ASR_BASE
Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/linux/drivers/crypto/asr/bcm/asr-sha.c b/marvell/linux/drivers/crypto/asr/bcm/asr-sha.c
new file mode 100644
index 0000000..acaac25
--- /dev/null
+++ b/marvell/linux/drivers/crypto/asr/bcm/asr-sha.c
@@ -0,0 +1,1223 @@
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/hw_random.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <crypto/scatterwalk.h>
+#include <linux/of_device.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <crypto/hmac.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+
+#include "asr-bcm.h"
+#include "asr-sha.h"
+
+// #define ASR_BCM_SHA_TEST
+
+static struct asr_bcm_sha *asr_sha_local = NULL;
+
+static inline u32 asr_sha_read(struct asr_bcm_sha *dd, u32 offset)
+{
+ u32 value = readl_relaxed(dd->io_base + offset);
+
+ return value;
+}
+
+static inline void asr_sha_write(struct asr_bcm_sha *dd,
+ u32 offset, u32 value)
+{
+ writel_relaxed(value, dd->io_base + offset);
+}
+
+/* ------- bcm sha hardware operation -------- */
+static void hash_sw_reset(struct asr_bcm_sha *dd)
+{
+ uint32_t val;
+
+ val = (0x1 << 0x3);
+ asr_sha_write(dd, HASH_CONTROL, val);
+ val = 0x0;
+ asr_sha_write(dd, HASH_CONTROL, val);
+
+ return;
+}
+
+static int hash_set_mode(struct asr_bcm_sha *dd, \
+ HASH_MODE_T mode, HASH_ALGO_T algo)
+{
+ uint32_t val;
+
+ val = asr_sha_read(dd, HASH_CONFIG);
+ val &= ~0xf;
+ val |= algo;
+ if (mode == HASH_HMAC)
+ val |= (0x1 << 0x3);
+ asr_sha_write(dd, HASH_CONFIG, val);
+
+ return 0;
+}
+
+static int hash_kick(struct asr_bcm_sha *dd)
+{
+ uint32_t val;
+ uint32_t cnt;
+
+ val = asr_sha_read(dd, HASH_COMMAND);
+ val |= (0x1 << 0x0);
+ asr_sha_write(dd, HASH_COMMAND, val);
+
+ cnt = 1;
+ /* wait for command */
+
+ do {
+ val = asr_sha_read(dd, HASH_STATUS);
+ if (cnt == 1000000) {
+ dev_err(dd->dev, "hash kick wait busy %u times..0x%08x\n", cnt, val);
+ return -1;
+ }
+ val &= 0xE;
+ udelay(1);
+ cnt++;
+ } while(val != 0);
+
+ cnt = 1;
+ do {
+ val = asr_sha_read(dd, HASH_STATUS);
+ if (cnt == 1000000) {
+ dev_err(dd->dev, "hash kick wait busy %u times..0x%08x\n", cnt, val);
+ return -1;
+ }
+ val &= 0x1;
+ udelay(1);
+ cnt++;
+ } while(val == 0);
+
+ /* clear status so next command can be issued */
+ asr_sha_write(dd, HASH_STATUS, val);
+
+ return 0;
+}
+
+static int hash_config_op(struct asr_bcm_sha *dd, HASH_OP_MODE_T op_mode)
+{
+ uint32_t val;
+ int ret = 0;
+
+ if (op_mode < HASH_INIT || op_mode > HASH_FINAL)
+ return -1;
+
+ val = asr_sha_read(dd, HASH_CONTROL);
+ val &= ~(0x3 << 0x0);
+ val |= op_mode;
+ asr_sha_write(dd, HASH_CONTROL, val);
+
+ ret = hash_kick(dd);
+ return ret;
+}
+
+static int hash_save_context(struct asr_sha_reqctx *ctx, int alg)
+{
+ int i;
+ struct hash_state *md = &ctx->md;
+ struct asr_bcm_sha *dd = ctx->dd;
+ switch(alg) {
+ case HASH_SHA384:
+ case HASH_SHA512:
+ for (i = 0; i < 8; i++) {
+ md->sha512.state[i] = asr_sha_read(dd, HASH_DIGEST(i));
+ md->sha512.state[i+8] = asr_sha_read(dd, HASH_DIGEST_H(i));
+ }
+ break;
+ case HASH_SHA256:
+ case HASH_SHA224:
+ for (i = 0; i < 8; i++) {
+ md->sha256.state[i] = asr_sha_read(dd, HASH_DIGEST(i));
+ }
+ break;
+ case HASH_SHA1:
+ for (i = 0; i < 5; i++) {
+ md->sha1.state[i] = asr_sha_read(dd, HASH_DIGEST(i));
+ }
+ break;
+ case HASH_MD5:
+ for (i = 0; i < 4; i++) {
+ md->md5.state[i] = asr_sha_read(dd, HASH_DIGEST(i));
+ }
+ break;
+ default:
+ dev_err(dd->dev, "hash save context: invalid alg!\r\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int hash_restore_context(struct asr_sha_reqctx *ctx, int alg)
+{
+ int i;
+ struct hash_state *md = &ctx->md;
+ struct asr_bcm_sha *dd = ctx->dd;
+
+ switch(alg) {
+ case HASH_SHA384:
+ case HASH_SHA512:
+ for (i = 0; i < 8; i++) {
+ asr_sha_write(dd, HASH_DIGEST(i), md->sha512.state[i]);
+ asr_sha_write(dd, HASH_DIGEST_H(i), md->sha512.state[i+8]);
+ }
+ break;
+ case HASH_SHA256:
+ case HASH_SHA224:
+ for (i = 0; i < 8; i++) {
+ asr_sha_write(dd, HASH_DIGEST(i), md->sha256.state[i]);
+ }
+ break;
+ case HASH_SHA1:
+ for (i = 0; i < 5; i++) {
+ asr_sha_write(dd, HASH_DIGEST(i), md->sha1.state[i]);
+ }
+ break;
+ case HASH_MD5:
+ for (i = 0; i < 4; i++) {
+ asr_sha_write(dd, HASH_DIGEST(i), md->md5.state[i]);
+ }
+ break;
+ default:
+ dev_err(dd->dev, "hash restore context: invalid alg!\r\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static inline void sha_cache_operation(void *addr, int size)
+{
+ __cpuc_flush_dcache_area(addr, size);
+}
+
+static int hash_compress_aligned(struct asr_sha_reqctx *ctx, int alg, uint8_t *in, int data_len)
+{
+ int ret = 0;
+ struct asr_bcm_sha *dd = ctx->dd;
+ struct asr_bcm_dev *dev_dd = container_of(dd, struct asr_bcm_dev, asr_sha);
+ struct asr_bcm_ops *bcm_ops = dev_dd->bcm_ops;
+
+ bcm_ops->dev_get(dev_dd);
+
+ if (((uint32_t)in & 0x3) || (data_len == 0))
+ return -1;
+
+ adec_engine_hw_reset(dev_dd, ACC_ENG_HASH);
+ hash_sw_reset(dd);
+ ret = hash_set_mode(dd, HASH_SIMPLE, alg);
+ if (ret)
+ goto error;
+
+ adec_engine_hw_reset(dev_dd, ACC_ENG_DMA);
+ abus_set_mode(dev_dd, ABUS_GRP_A_HASH, ABUS_GRP_B_AES, ABUS_CROSS, ABUS_STRAIGHT);
+ dma_input_config(dev_dd, 0, 0);
+ ret = hash_restore_context(ctx, alg);
+ if (ret)
+ goto error;
+
+ ret = dma_input_address(dev_dd, (uint32_t)virt_to_phys((void *)in), \
+ ROUND_UP_TO_WORD_CNT(data_len), 0);
+ if (ret)
+ goto error;
+
+ sha_cache_operation(in, (ROUND_UP_TO_WORD_CNT(data_len) << 2));
+ dma_input_start(dev_dd);
+ asr_sha_write(dd, HASH_INCOME_SEG_SZ, data_len);
+ ret = hash_config_op(dd, HASH_UPDATE);
+ if (ret) {
+ dma_input_stop(dev_dd);
+ goto error;
+ }
+
+ dma_wait_input_finish(dev_dd);
+ dma_input_stop(dev_dd);
+
+ ret = hash_save_context(ctx, alg);
+ if (ret)
+ goto error;
+
+error:
+ bcm_ops->dev_put(dev_dd);
+ return ret;
+}
+
+static int hash_compress(struct asr_sha_reqctx *ctx, int alg, uint8_t *in, int blks, int blk_sz)
+{
+ uint8_t *dma_in = NULL;
+ int data_len = blks * blk_sz;
+ int ret, n;
+ uint8_t *ptr_in;
+
+ if (((uint32_t)in & 0x3) == 0) {
+ dma_in = in;
+ ret = hash_compress_aligned(ctx, alg, dma_in, data_len);
+ return ret;
+ }
+
+ n = min(data_len, HASH_ALIGN_BUF_SIZE);
+ dma_in = (uint8_t *)kmalloc((n + 0x10), GFP_KERNEL);
+ if (!dma_in) {
+ ret = -1;
+ goto exit;
+ }
+ dma_in = (uint8_t *)(((uint32_t)(dma_in)) & (~0x3));
+
+ ptr_in = in;
+ do {
+ n = min(data_len, HASH_ALIGN_BUF_SIZE);
+ memcpy((void *)dma_in, (void *)ptr_in, n);
+ ret = hash_compress_aligned(ctx, alg, dma_in, n);
+ if (ret) {
+ goto exit;
+ }
+ data_len -= n;
+ ptr_in +=n;
+ } while(data_len > 0);
+
+exit:
+ if (dma_in)
+ kfree(dma_in);
+ return ret;
+}
+
+static int hash_tail_process(struct asr_sha_reqctx *ctx, uint8_t *out, int out_size, \
+ uint64_t total_size, int tail_size, unsigned char *dma_addr, int alg)
+{
+ int ret = 0;
+ int reg_val, i;
+ struct asr_bcm_sha *dd = ctx->dd;
+ struct asr_bcm_dev *dev_dd = container_of(dd, struct asr_bcm_dev, asr_sha);
+ struct asr_bcm_ops *bcm_ops = dev_dd->bcm_ops;
+
+ bcm_ops->dev_get(dev_dd);
+
+ adec_engine_hw_reset(dev_dd, ACC_ENG_HASH);
+ hash_sw_reset(dd);
+ ret = hash_set_mode(dd, HASH_SIMPLE, alg);
+ if (ret)
+ goto error;
+
+ adec_engine_hw_reset(dev_dd, ACC_ENG_DMA);
+ abus_set_mode(dev_dd, ABUS_GRP_A_HASH, ABUS_GRP_B_AES, ABUS_CROSS, ABUS_STRAIGHT);
+ dma_input_config(dev_dd, 0, 0);
+ ret = hash_restore_context(ctx, alg);
+ if (ret)
+ goto error;
+
+ ret = dma_input_address(dev_dd, (uint32_t)virt_to_phys((void *)dma_addr), \
+ ROUND_UP_TO_WORD_CNT(tail_size), 0);
+ if (ret)
+ goto error;
+
+ if (tail_size) {
+ sha_cache_operation(dma_addr, (ROUND_UP_TO_WORD_CNT(tail_size) << 2));
+ dma_input_start(dev_dd);
+ }
+
+ asr_sha_write(dd, HASH_INCOME_SEG_SZ, tail_size);
+ asr_sha_write(dd, HASH_TOTAL_MSG_SZ_L, (total_size & 0xffffffff));
+ asr_sha_write(dd, HASH_TOTAL_MSG_SZ_H, (total_size >> 32));
+
+ reg_val = asr_sha_read(dd, HASH_CONTROL);
+ reg_val |= (0x1 << 0x2);
+ asr_sha_write(dd, HASH_CONTROL, reg_val);
+
+ ret = hash_config_op(dd, HASH_FINAL);
+ if (ret) {
+ if (tail_size)
+ dma_input_stop(dev_dd);
+ goto error;
+ }
+
+ if (tail_size) {
+ dma_wait_input_finish(dev_dd);
+ dma_input_stop(dev_dd);
+ }
+
+ /* copy digest out */
+ if (alg == HASH_SHA384 || alg == HASH_SHA512) {
+ for (i = 0; i < (out_size / 8); i++) {
+ reg_val = asr_sha_read(dd, HASH_DIGEST(i));
+ out[4 + i * 8] = (uint8_t)(reg_val & 0xFF);
+ out[5 + i * 8] = (uint8_t)((reg_val >> 8) & 0xFF);
+ out[6 + i * 8] = (uint8_t)((reg_val >> 16) & 0xFF);
+ out[7 + i * 8] = (uint8_t)((reg_val >> 24) & 0xFF);
+ reg_val = asr_sha_read(dd, HASH_DIGEST_H(i));
+ out[0 + i * 8] = (uint8_t)(reg_val & 0xFF);
+ out[1 + i * 8] = (uint8_t)((reg_val >> 8) & 0xFF);
+ out[2 + i * 8] = (uint8_t)((reg_val >> 16) & 0xFF);
+ out[3 + i * 8] = (uint8_t)((reg_val >> 24) & 0xFF);
+ }
+ } else {
+ for (i = 0; i < (out_size / 4); i++) {
+ reg_val = asr_sha_read(dd, HASH_DIGEST(i));
+ out[0 + i * 4] = (uint8_t)(reg_val & 0xFF);
+ out[1 + i * 4] = (uint8_t)((reg_val >> 8) & 0xFF);
+ out[2 + i * 4] = (uint8_t)((reg_val >> 16) & 0xFF);
+ out[3 + i * 4] = (uint8_t)((reg_val >> 24) & 0xFF);
+ }
+ }
+
+error:
+ bcm_ops->dev_put(dev_dd);
+ return ret;
+}
+
+static int hash_init(struct asr_sha_reqctx *ctx, int alg)
+{
+ int ret;
+ struct asr_bcm_sha *dd = ctx->dd;
+ struct asr_bcm_dev *dev_dd = container_of(dd, struct asr_bcm_dev, asr_sha);
+ struct asr_bcm_ops *bcm_ops = dev_dd->bcm_ops;
+
+ bcm_ops->dev_get(dev_dd);
+
+ adec_engine_hw_reset(dev_dd, ACC_ENG_HASH);
+ hash_sw_reset(dd);
+
+ ret = hash_set_mode(dd, HASH_SIMPLE, alg);
+ if (ret)
+ goto error;
+ ret = hash_config_op(dd, HASH_INIT);
+ if (ret)
+ goto error;
+
+ ret = hash_save_context(ctx, alg);
+ if (ret)
+ goto error;
+
+error:
+ bcm_ops->dev_put(dev_dd);
+ return ret;
+}
+
+/* Only block algnie is processed at a time */
+static int hash_process(struct asr_sha_reqctx *ctx, int alg, uint8_t *in, uint32_t inlen)
+{
+ int err;
+ uint32_t n, blocks;
+ struct hash_state *md = &ctx->md;
+
+ if (md->curlen > sizeof(md->buf)) {
+ return -1;
+ }
+
+ while (inlen > 0) {
+ if (md->curlen == 0 && inlen >= md->block_size) {
+ blocks = inlen / md->block_size;
+ err = hash_compress(ctx, alg, in, blocks, md->block_size);
+ if (err)
+ return err;
+ md->length += blocks * md->block_size * 8;
+ in += blocks * md->block_size;
+ inlen -= blocks * md->block_size;
+ } else {
+ n = min(inlen, (md->block_size - md->curlen));
+ memcpy(md->buf + md->curlen, in, n);
+ md->curlen += n;
+ in += n;
+ inlen -= n;
+ if (md->curlen == md->block_size) {
+ err = hash_compress(ctx, alg, md->buf, 1, md->block_size);
+ if (err)
+ return err;
+ md->length += 8*md->block_size;
+ md->curlen = 0;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int hash_done(struct asr_sha_reqctx *ctx, int alg, uint8_t *out)
+{
+ uint32_t out_len;
+ struct hash_state *md = &ctx->md;
+ struct asr_bcm_sha *dd = ctx->dd;
+
+ switch(alg) {
+ case HASH_SHA512:
+ out_len = HASH_LEN_SHA512;
+ break;
+ case HASH_SHA384:
+ out_len = HASH_LEN_SHA384;
+ break;
+ case HASH_SHA256:
+ out_len = HASH_LEN_SHA256;
+ break;
+ case HASH_SHA224:
+ out_len = HASH_LEN_SHA224;
+ break;
+ case HASH_SHA1:
+ out_len = HASH_LEN_SHA1;
+ break;
+ case HASH_MD5:
+ out_len = HASH_LEN_MD5;
+ break;
+ default:
+ dev_err(dd->dev, "err: not support hash alg\n");
+ return -1;
+ }
+
+ return hash_tail_process(ctx, out, out_len, \
+ (md->length / 8 + md->curlen), md->curlen, md->buf, alg);
+}
+/* ------- end -------- */
+
+static size_t asr_sha_append_sg(struct asr_sha_reqctx *ctx)
+{
+ size_t count;
+
+ while ((ctx->bufcnt < ctx->buflen) && ctx->total) {
+ count = min(ctx->sg->length - ctx->offset, ctx->total);
+ count = min(count, ctx->buflen - ctx->bufcnt);
+
+ if (count <= 0) {
+ /*
+ * Check if count <= 0 because the buffer is full or
+ * because the sg length is 0. In the latest case,
+ * check if there is another sg in the list, a 0 length
+ * sg doesn't necessarily mean the end of the sg list.
+ */
+ if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
+ ctx->sg = sg_next(ctx->sg);
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
+ ctx->offset, count, 0);
+
+ ctx->bufcnt += count;
+ ctx->offset += count;
+ ctx->total -= count;
+
+ if (ctx->offset == ctx->sg->length) {
+ ctx->sg = sg_next(ctx->sg);
+ if (ctx->sg)
+ ctx->offset = 0;
+ else
+ ctx->total = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int asr_sha_handle_queue(struct asr_bcm_sha *dd,
+ struct ahash_request *req)
+{
+ struct crypto_async_request *async_req, *backlog;
+ struct asr_sha_ctx *ctx;
+ unsigned long flags;
+ bool start_async;
+ int err = 0, ret = 0;
+
+ spin_lock_irqsave(&dd->lock, flags);
+ if (req)
+ ret = ahash_enqueue_request(&dd->queue, req);
+
+ if (SHA_FLAGS_BUSY & dd->flags) {
+ spin_unlock_irqrestore(&dd->lock, flags);
+ return ret;
+ }
+
+ backlog = crypto_get_backlog(&dd->queue);
+ async_req = crypto_dequeue_request(&dd->queue);
+ if (async_req)
+ dd->flags |= SHA_FLAGS_BUSY;
+
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ if (!async_req) {
+ return ret;
+ }
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ ctx = crypto_tfm_ctx(async_req->tfm);
+
+ dd->req = ahash_request_cast(async_req);
+ start_async = (dd->req != req);
+ dd->is_async = start_async;
+ dd->force_complete = false;
+
+ /* WARNING: ctx->start() MAY change dd->is_async. */
+ err = ctx->start(dd);
+ return (start_async) ? ret : err;
+}
+
+static int asr_sha_enqueue(struct ahash_request *req, unsigned int op)
+{
+ struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
+ struct asr_bcm_sha *dd = ctx->dd;
+
+ ctx->op = op;
+
+ return asr_sha_handle_queue(dd, req);
+}
+
+static void asr_sha_copy_ready_hash(struct ahash_request *req)
+{
+ struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
+
+ if (!req->result)
+ return;
+
+ switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
+ case SHA_FLAGS_MD5:
+ memcpy(req->result, ctx->digest, MD5_DIGEST_SIZE);
+ break;
+ case SHA_FLAGS_SHA1:
+ memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
+ break;
+ case SHA_FLAGS_SHA224:
+ memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE);
+ break;
+ case SHA_FLAGS_SHA256:
+ memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
+ break;
+ case SHA_FLAGS_SHA384:
+ memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE);
+ break;
+ case SHA_FLAGS_SHA512:
+ memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE);
+ break;
+ default:
+ return;
+ }
+}
+
+static inline int asr_sha_complete(struct asr_bcm_sha *dd, int err)
+{
+ struct ahash_request *req = dd->req;
+ struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
+
+ dd->flags &= ~(SHA_FLAGS_BUSY);
+ ctx->flags &= ~(SHA_FLAGS_FINAL);
+
+ if ((dd->is_async || dd->force_complete) && req->base.complete)
+ req->base.complete(&req->base, err);
+
+ /* handle new request */
+ tasklet_schedule(&dd->queue_task);
+
+ return err;
+}
+
+static int asr_sha_buff_init(struct asr_bcm_sha *dd, uint32_t len)
+{
+ struct ahash_request *req = dd->req;
+ struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
+
+ ctx->buffer = (void *)__get_free_pages(GFP_KERNEL, get_order(len));
+ if (!ctx->buffer) {
+ dev_err(dd->dev, "unable to alloc pages.\n");
+ return -ENOMEM;
+ }
+
+ ctx->buflen = PAGE_SIZE << get_order(len);
+
+ return 0;
+}
+
+static void asr_sha_buff_cleanup(struct asr_bcm_sha *dd, uint32_t len)
+{
+ struct ahash_request *req = dd->req;
+ struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
+
+ free_pages((unsigned long)ctx->buffer, get_order(len));
+ ctx->buflen = 0;
+}
+
+static int sha_init_req(struct asr_sha_reqctx *ctx)
+{
+ int ret = 0;
+
+ /* hardware: hash init */
+ ret = hash_init(ctx, ctx->md.alg);
+ if (ret)
+ return -EINVAL;
+ return 0;
+}
+
+static int sha_update_req(struct asr_sha_reqctx *ctx)
+{
+ int ret = 0;
+ int bufcnt;
+ uint32_t buflen = ctx->total;
+
+ ret = asr_sha_buff_init(ctx->dd, ctx->total);
+ if (ret)
+ return -ENOMEM;
+
+ asr_sha_append_sg(ctx);
+ bufcnt = ctx->bufcnt;
+ ctx->bufcnt = 0;
+
+ /* hashware: hash process */
+ ret = hash_process(ctx, ctx->md.alg, ctx->buffer, bufcnt);
+ if (ret)
+ ret = -EINVAL;
+
+ asr_sha_buff_cleanup(ctx->dd, buflen);
+ return ret;
+}
+
+static void sha_finish_req(struct asr_sha_reqctx *ctx, int *err)
+{
+ uint8_t *hash = (uint8_t *)ctx->digest;
+
+ if (!(*err) && (ctx->flags & SHA_FLAGS_FINAL)) {
+ *err = hash_done(ctx, ctx->md.alg, (uint8_t *)hash);
+ asr_sha_copy_ready_hash(ctx->dd->req);
+ ctx->flags &= (~SHA_FLAGS_FINAL);
+ } else {
+ ctx->flags |= SHA_FLAGS_ERROR;
+ }
+}
+
+static void sha_next_req(struct asr_sha_reqctx *ctx, int *err)
+{
+ if (likely(!(*err) && (SHA_FLAGS_FINAL & ctx->flags)))
+ sha_finish_req(ctx, err);
+
+ (void)asr_sha_complete(ctx->dd, *err);
+}
+
+static int asr_sha_start(struct asr_bcm_sha *dd)
+{
+ int err = 0;
+ struct ahash_request *req = dd->req;
+ struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
+
+ mutex_lock(&dd->queue_lock);
+
+ if ((ctx->flags & SHA_FLAGS_INIT)) {
+ err = sha_init_req(ctx);
+ ctx->flags &= (~SHA_FLAGS_INIT);
+ if (err) {
+ mutex_unlock(&dd->queue_lock);
+ return err;
+ }
+ }
+
+ if (ctx->op == SHA_OP_UPDATE) {
+ err = sha_update_req(ctx);
+ if (!err && (ctx->flags & SHA_FLAGS_FINUP))
+ /* no final() after finup() */
+ sha_finish_req(ctx, &err);
+ } else if (ctx->op == SHA_OP_FINAL) {
+ sha_finish_req(ctx, &err);
+ }
+
+ if (unlikely(err != -EINPROGRESS)) {
+ /* Task will not finish it, so do it here */
+ sha_next_req(ctx, &err);
+ }
+
+ mutex_unlock(&dd->queue_lock);
+ return err;
+}
+
+static int asr_sha_cra_init(struct crypto_tfm *tfm)
+{
+ struct asr_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct asr_sha_reqctx));
+ ctx->start = asr_sha_start;
+
+ return 0;
+}
+
+static void asr_sha_cra_exit(struct crypto_tfm *tfm)
+{
+ struct asr_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+ memset(ctx, 0, sizeof(*ctx));
+}
+
+static inline void asr_sha_get(struct asr_bcm_sha *dd)
+{
+ mutex_lock(&dd->sha_lock);
+}
+
+static inline void asr_sha_put(struct asr_bcm_sha *dd)
+{
+ if(mutex_is_locked(&dd->sha_lock))
+ mutex_unlock(&dd->sha_lock);
+}
+
+static int asr_sha_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
+ struct asr_bcm_sha *dd = asr_sha_local;
+
+ asr_sha_get(dd);
+
+ ctx->dd = dd;
+ memset(&ctx->md, 0, sizeof(ctx->md));
+ ctx->flags = 0;
+
+ switch (crypto_ahash_digestsize(tfm)) {
+ case MD5_DIGEST_SIZE:
+ ctx->flags |= SHA_FLAGS_MD5;
+ ctx->md.alg = HASH_MD5;
+ ctx->md.block_size = MD5_HMAC_BLOCK_SIZE;
+ break;
+ case SHA1_DIGEST_SIZE:
+ ctx->flags |= SHA_FLAGS_SHA1;
+ ctx->md.alg = HASH_SHA1;
+ ctx->md.block_size = SHA1_BLOCK_SIZE;
+ break;
+ case SHA224_DIGEST_SIZE:
+ ctx->flags |= SHA_FLAGS_SHA224;
+ ctx->md.alg = HASH_SHA224;
+ ctx->md.block_size = SHA224_BLOCK_SIZE;
+ break;
+ case SHA256_DIGEST_SIZE:
+ ctx->flags |= SHA_FLAGS_SHA256;
+ ctx->md.alg = HASH_SHA256;
+ ctx->md.block_size = SHA256_BLOCK_SIZE;
+ break;
+ case SHA384_DIGEST_SIZE:
+ ctx->flags |= SHA_FLAGS_SHA384;
+ ctx->md.alg = HASH_SHA384;
+ ctx->md.block_size = SHA384_BLOCK_SIZE;
+ break;
+ case SHA512_DIGEST_SIZE:
+ ctx->flags |= SHA_FLAGS_SHA512;
+ ctx->md.alg = HASH_SHA512;
+ ctx->md.block_size = SHA512_BLOCK_SIZE;
+ break;
+ default:
+ asr_sha_put(dd);
+ return -EINVAL;
+ }
+
+ ctx->bufcnt = 0;
+ ctx->flags |= SHA_FLAGS_INIT;
+
+ asr_sha_put(dd);
+ return 0;
+}
+
+static int asr_sha_update(struct ahash_request *req)
+{
+ int ret = 0;
+ struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
+
+ asr_sha_get(ctx->dd);
+
+ ctx->total = req->nbytes;
+ ctx->sg = req->src;
+ ctx->offset = 0;
+
+ ret = asr_sha_enqueue(req, SHA_OP_UPDATE);
+
+ asr_sha_put(ctx->dd);
+ return ret;
+}
+
+static int asr_sha_final(struct ahash_request *req)
+{
+ int ret = 0;
+ struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
+
+ asr_sha_get(ctx->dd);
+
+ ctx->flags |= SHA_FLAGS_FINAL;
+ if (ctx->flags & SHA_FLAGS_ERROR) {
+ asr_sha_put(ctx->dd);
+ return 0; /* uncompleted hash is not needed */
+ }
+ ret = asr_sha_enqueue(req, SHA_OP_FINAL);
+
+ asr_sha_put(ctx->dd);
+ return ret;
+}
+
+static int asr_sha_finup(struct ahash_request *req)
+{
+ struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
+ int err1, err2;
+
+ ctx->flags |= SHA_FLAGS_FINUP;
+
+ err1 = asr_sha_update(req);
+ if (err1 == -EINPROGRESS ||
+ (err1 == -EBUSY && (ahash_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_BACKLOG))) {
+ asr_sha_put(ctx->dd);
+ return err1;
+ }
+ /*
+ * final() has to be always called to cleanup resources
+ * even if udpate() failed, except EINPROGRESS
+ */
+ err2 = asr_sha_final(req);
+
+ return err1 ?: err2;
+}
+
+static int asr_sha_digest(struct ahash_request *req)
+{
+ return asr_sha_init(req) ?: asr_sha_finup(req);
+}
+
+static int asr_sha_export(struct ahash_request *req, void *out)
+{
+ const struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
+
+ memcpy(out, ctx, sizeof(*ctx));
+ return 0;
+}
+
+static int asr_sha_import(struct ahash_request *req, const void *in)
+{
+ struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
+
+ memcpy(ctx, in, sizeof(*ctx));
+ return 0;
+}
+
+static struct ahash_alg sha_algs[] = {
+ /* md5 */
+ {
+ .init = asr_sha_init,
+ .update = asr_sha_update,
+ .final = asr_sha_final,
+ .finup = asr_sha_finup,
+ .digest = asr_sha_digest,
+ .export = asr_sha_export,
+ .import = asr_sha_import,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct asr_sha_reqctx),
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "asr-md5",
+ .cra_priority = ASR_SHA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct asr_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = asr_sha_cra_init,
+ .cra_exit = asr_sha_cra_exit,
+ }
+ }
+ },
+
+ /* sha1 */
+ {
+ .init = asr_sha_init,
+ .update = asr_sha_update,
+ .final = asr_sha_final,
+ .finup = asr_sha_finup,
+ .digest = asr_sha_digest,
+ .export = asr_sha_export,
+ .import = asr_sha_import,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct asr_sha_reqctx),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "asr-sha1",
+ .cra_priority = ASR_SHA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct asr_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = asr_sha_cra_init,
+ .cra_exit = asr_sha_cra_exit,
+ }
+ }
+ },
+
+ /* sha224 */
+ {
+ .init = asr_sha_init,
+ .update = asr_sha_update,
+ .final = asr_sha_final,
+ .finup = asr_sha_finup,
+ .digest = asr_sha_digest,
+ .export = asr_sha_export,
+ .import = asr_sha_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct asr_sha_reqctx),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "asr-sha224",
+ .cra_priority = ASR_SHA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct asr_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = asr_sha_cra_init,
+ .cra_exit = asr_sha_cra_exit,
+ }
+ }
+ },
+
+ /* sha256 */
+ {
+ .init = asr_sha_init,
+ .update = asr_sha_update,
+ .final = asr_sha_final,
+ .finup = asr_sha_finup,
+ .digest = asr_sha_digest,
+ .export = asr_sha_export,
+ .import = asr_sha_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct asr_sha_reqctx),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "asr-sha256",
+ .cra_priority = ASR_SHA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct asr_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = asr_sha_cra_init,
+ .cra_exit = asr_sha_cra_exit,
+ }
+ }
+ },
+
+ /* sha384 */
+ {
+ .init = asr_sha_init,
+ .update = asr_sha_update,
+ .final = asr_sha_final,
+ .finup = asr_sha_finup,
+ .digest = asr_sha_digest,
+ .export = asr_sha_export,
+ .import = asr_sha_import,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct asr_sha_reqctx),
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "asr-sha384",
+ .cra_priority = ASR_SHA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct asr_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = asr_sha_cra_init,
+ .cra_exit = asr_sha_cra_exit,
+ }
+ }
+ },
+
+ /* sha512 */
+ {
+ .init = asr_sha_init,
+ .update = asr_sha_update,
+ .final = asr_sha_final,
+ .finup = asr_sha_finup,
+ .digest = asr_sha_digest,
+ .export = asr_sha_export,
+ .import = asr_sha_import,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct asr_sha_reqctx),
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "asr-sha512",
+ .cra_priority = ASR_SHA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct asr_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = asr_sha_cra_init,
+ .cra_exit = asr_sha_cra_exit,
+ }
+ }
+ },
+};
+
+static void asr_sha_queue_task(unsigned long data)
+{
+ struct asr_bcm_sha *dd = (struct asr_bcm_sha *)data;
+
+ asr_sha_handle_queue(dd, NULL);
+}
+
+#ifdef ASR_BCM_SHA_TEST
+ static int bcm_sha_test(struct asr_bcm_sha *dd);
+#endif
+
+int asr_bcm_sha_register(struct asr_bcm_dev *bcm_dd)
+{
+ int err, i, j;
+ struct asr_bcm_sha *sha_dd;
+
+ sha_dd = &bcm_dd->asr_sha;
+
+ sha_dd->dev = bcm_dd->dev;
+ sha_dd->io_base = bcm_dd->io_base;
+ sha_dd->phys_base = bcm_dd->phys_base;
+
+ asr_sha_local = sha_dd;
+
+ spin_lock_init(&sha_dd->lock);
+ mutex_init(&sha_dd->sha_lock);
+ mutex_init(&sha_dd->queue_lock);
+ tasklet_init(&sha_dd->queue_task, asr_sha_queue_task,
+ (unsigned long)sha_dd);
+ crypto_init_queue(&sha_dd->queue, ASR_SHA_QUEUE_LENGTH);
+
+ for (i = 0; i < ARRAY_SIZE(sha_algs); i++) {
+ err = crypto_register_ahash(&sha_algs[i]);
+ if (err)
+ goto err_sha_algs;
+ }
+
+#ifdef ASR_BCM_SHA_TEST
+ bcm_sha_test(sha_dd);
+#endif
+
+ return 0;
+
+err_sha_algs:
+ for (j = 0; j < i; j++)
+ crypto_unregister_ahash(&sha_algs[j]);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(asr_bcm_sha_register);
+
+int asr_bcm_sha_unregister(struct asr_bcm_dev *bcm_dd)
+{
+ int i;
+ struct asr_bcm_sha *sha_dd = &bcm_dd->asr_sha;
+
+
+ for (i = 0; i < ARRAY_SIZE(sha_algs); i++)
+ crypto_unregister_ahash(&sha_algs[i]);
+
+ tasklet_kill(&sha_dd->queue_task);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(asr_bcm_sha_unregister);
+
+#ifdef ASR_BCM_SHA_TEST
+
+static int bcm_sha_test(struct asr_bcm_sha *dd)
+{
+ int ret = 0;
+
+ const struct {
+ const char *msg;
+ uint8_t hash[20];
+ } sha1_tests[] = {
+ {
+ "abc",
+ { 0xa9, 0x99, 0x3e, 0x36, 0x47, 0x06,
+ 0x81, 0x6a, 0xba, 0x3e, 0x25, 0x71,
+ 0x78, 0x50, 0xc2, 0x6c, 0x9c, 0xd0,
+ 0xd8, 0x9d
+ }
+ },
+ {
+ "asjhsdjljfdsdjjkdfwyqeuwouzxkmcxjkmwqds"
+ "jklfdfjlkdfkfsfkjlfskjdflioherfjjfdjkfd"
+ "nkfdfdojjodfjdfjflj;sljjlfkklnfnkgbhhoi"
+ "gfhigfopojpfjojpoffkjlfskjdflioherfjjfd"
+ "jkfdnkfdfdojjodfjdfjfljnfnkgbhhoigfhigf"
+ "oponfnkgbhhoigfhigfopojpfjoewiroiowiods"
+ "djkisijdknknkskdnknflnnesniewinoinknmdn"
+ "kknknsdnjjfsnnkfnkknslnklknfnknkflksnlk"
+ "lskldklklklnmlflmlmlfmlfml",
+ {
+ 0xc4, 0x53, 0xca, 0x24, 0xfa, 0xe5,
+ 0x39, 0x53, 0x08, 0x8c, 0x57, 0x1a,
+ 0x96, 0xe9, 0x64, 0x7f, 0xd5, 0xf9,
+ 0x13, 0x91
+ }
+ }
+ };
+
+ struct asr_sha_reqctx ctx1;
+ struct asr_sha_reqctx ctx2;
+
+ unsigned char out_sha1_1[20] = {0};
+ unsigned char out_sha1_2[20] = {0};
+
+ memset(&ctx1.md, 0, sizeof(ctx1.md));
+ ctx1.md.block_size = BLOCK_ALGIN_SIZE;
+ ctx1.dd = dd;
+
+ memset(&ctx2.md, 0, sizeof(ctx2.md));
+ ctx2.md.block_size = BLOCK_ALGIN_SIZE;
+ ctx2.dd = dd;
+
+ ret = hash_init(&ctx1, HASH_SHA1);
+ if (ret) {
+ return ret;
+ }
+ ret = hash_init(&ctx2, HASH_SHA1);
+ if (ret) {
+ return ret;
+ }
+ ret = hash_process(&ctx1, HASH_SHA1, (uint8_t *)sha1_tests[0].msg, strlen(sha1_tests[0].msg));
+ if (ret) {
+ return ret;
+ }
+ ret = hash_done(&ctx1, HASH_SHA1, out_sha1_1);
+ if (ret) {
+ return ret;
+ }
+ ret = hash_process(&ctx2, HASH_SHA1, (uint8_t *)sha1_tests[1].msg, strlen(sha1_tests[1].msg));
+ if (ret) {
+ return ret;
+ }
+ ret = hash_done(&ctx2, HASH_SHA1, out_sha1_2);
+ if (ret) {
+ return ret;
+ }
+
+ if (memcmp(out_sha1_1, sha1_tests[0].hash, sizeof(out_sha1_1))) {
+ printk("sha1 test 0 failed");
+ } else {
+ printk("sha1 test 0 pass");
+ }
+ if (memcmp(out_sha1_2, sha1_tests[1].hash, sizeof(out_sha1_2))) {
+ printk("sha1 test 1 failed");
+ } else {
+ printk("sha1 test 1 pass");
+ }
+
+ return 0;
+}
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("wangyonggan <yongganwang@asrmicro.com>");
+MODULE_DESCRIPTION("ASR bcm sha driver");
\ No newline at end of file