blob: 408ea9a2c21ebcc5044177dffcf97bf3024a33e6 [file] [log] [blame]
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/clk-provider.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/hw_random.h>
#include <linux/platform_device.h>
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <linux/cputype.h>
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/internal/skcipher.h>
#include "asr-geu.h"
static struct asr_geu_aes *asr_aes_local = NULL;
static inline u32 asr_aes_read(struct asr_geu_aes *dd, u32 offset)
{
u32 value = readl_relaxed(dd->io_base + offset);
return value;
}
static inline void asr_aes_write(struct asr_geu_aes *dd,
u32 offset, u32 value)
{
writel_relaxed(value, dd->io_base + offset);
}
static inline void asr_aes_write_mask(struct asr_geu_aes *dd, u32 offset,
u32 value, u32 mask)
{
u32 val;
val = asr_aes_read(dd, offset);
val &= ~mask;
val |= value;
asr_aes_write(dd, offset, val);
}
static void asr_aes_read_n(struct asr_geu_aes *dd, u32 offset,
u32 *value, int count)
{
for (; count--; value++, offset += 4)
*value = asr_aes_read(dd, offset);
}
static void asr_aes_write_n(struct asr_geu_aes *dd, u32 offset,
const u32 *value, int count)
{
for (; count--; value++, offset += 4)
asr_aes_write(dd, offset, *value);
}
static inline void asr_aes_read_block(struct asr_geu_aes *dd, u32 offset,
u32 *value)
{
asr_aes_read_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
}
static inline void asr_aes_write_block(struct asr_geu_aes *dd, u32 offset,
const u32 *value)
{
asr_aes_write_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
}
static int asr_aes_rkek_fused(struct asr_geu_aes *dd)
{
u32 val;
/* If RKEK is burned, SW access to it must be disabled as well */
#if defined(CONFIG_CPU_ASR1901)
/* 1901, 1906 */
/* check if LCS_DM is burned */
val = asr_aes_read(dd, GEU_KSTR_BANK6_LCS);
val >>= GEU_KSTR_LCS_DM_BASE;
val &= GEU_KSTR_LCS_MASK;
if (hweight32(val) > 1)
return 1;
#else
/* 1803, 1806, 1828, 1903 */
/* check if secure key access disable is burned */
val = asr_aes_read(dd, GEU_FUSE_VAL_APCFG2);
if (val & GEU_SECURE_KEY_ACCESS_DISABLED)
return 1;
#endif
return 0;
}
static int asr_aes_wait_for_key_ready(struct asr_geu_aes *dd)
{
unsigned int status;
unsigned int timeout = 10000;
while(timeout)
{
status = asr_aes_read(dd, GEU_STATUS);
if (status & GEU_STATUS_ROUND_KEY_READY)
return 0;
udelay(1);
timeout --;
}
return 1;
}
static inline int asr_aes_wait_for_data_ready(struct asr_geu_aes *dd)
{
u32 status;
int timeout = 10000;
while (timeout) {
status = asr_aes_read(dd, GEU_STATUS);
if (status & GEU_STATUS_DATAO_READY)
return 0;
timeout --;
}
return 1;
}
static inline size_t asr_aes_padlen(size_t len, size_t block_size)
{
len &= block_size - 1;
return len ? block_size - len : 0;
}
static irqreturn_t asr_aes_irq(u32 status, void *dev_id)
{
struct asr_geu_aes *aes_dd = dev_id;
if (status & GEU_STATUS_DATAO_READY) {
if (AES_FLAGS_BUSY & aes_dd->flags) {
tasklet_schedule(&aes_dd->done_task);
}
else
dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static int asr_aes_hw_init(struct asr_geu_aes *dd)
{
asr_aes_write(dd, GEU_CONFIG, 0);
asr_aes_write(dd, GEU_STATUS, 0);
return 0;
}
static inline void asr_aes_set_mode(struct asr_geu_aes *dd,
const struct asr_aes_reqctx *rctx)
{
/* Clear all but persistent flags and set request flags. */
dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode;
}
static inline bool asr_aes_is_encrypt(const struct asr_geu_aes *dd)
{
return (dd->flags & AES_FLAGS_ENCRYPT);
}
static void asr_aes_set_iv_as_last_ciphertext_block(struct asr_geu_aes *dd)
{
struct skcipher_request *req = skcipher_request_cast(dd->areq);
struct asr_aes_reqctx *rctx = skcipher_request_ctx(req);
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
unsigned int ivsize = crypto_skcipher_ivsize(cipher);
if (req->cryptlen < ivsize)
return;
if (rctx->mode & AES_FLAGS_ENCRYPT) {
scatterwalk_map_and_copy(req->iv, req->dst,
req->cryptlen - ivsize, ivsize, 0);
} else {
if (req->src == req->dst)
memcpy(req->iv, rctx->lastc, ivsize);
else
scatterwalk_map_and_copy(req->iv, req->src,
req->cryptlen - ivsize,
ivsize, 0);
}
}
static inline int asr_aes_complete(struct asr_geu_aes *dd, int err)
{
struct asr_geu_dev *geu_dd = dev_get_drvdata(dd->dev);
struct asr_geu_ops *geu_ops = geu_dd->geu_ops;
dd->flags &= ~AES_FLAGS_BUSY;
asr_aes_set_iv_as_last_ciphertext_block(dd);
if (dd->int_mode)
asr_aes_write_mask(dd, GEU_STATUS, 0, GEU_STATUS_DATA_ENCDEC_ENA);
geu_ops->dev_put(geu_dd);
if (dd->is_async)
dd->areq->complete(dd->areq, err);
tasklet_schedule(&dd->queue_task);
return err;
}
static void asr_aes_enable_dma(struct asr_geu_aes *dd)
{
u32 config;
config = asr_aes_read(dd, GEU_CONFIG);
config |= GEU_CFG_DMA_MODE_EN;
asr_aes_write(dd, GEU_CONFIG, config);
}
static int asr_aes_write_ctrl_key(struct asr_geu_aes *dd, bool use_rkek,
const u32 *iv, const u32 *key, int keylen)
{
u32 config, flags;
int ret = 0;
config = asr_aes_read(dd, GEU_CONFIG);
config &= ~GEU_CFG_KEYSIZE_MASK;
config |= GEU_CFG_PWR_BYP;
config |= GEU_CFG_DATARSR;
config |= GEU_CFG_OCB_BYP;
flags = dd->flags & AES_FLAGS_MODE_MASK;
if (flags & AES_FLAGS_ENCRYPT)
config &= ~GEU_CFG_ENC_DEC;
else
config |= GEU_CFG_ENC_DEC;
if ((dd->flags & AES_FLAGS_OPMODE_MASK) == AES_FLAGS_CBC)
config |= GEU_CFG_CBC_ECB;
else /* ECB */
config &= ~GEU_CFG_CBC_ECB;
if (keylen == AES_KEYSIZE_128)
config |= GEU_CFG_KEYSIZE_128;
else if (keylen == AES_KEYSIZE_192)
config |= GEU_CFG_KEYSIZE_192;
else
config |= GEU_CFG_KEYSIZE_256;
if (use_rkek) {
config |= GEU_CFG_ENA_RKEK;
} else {
config &= ~GEU_CFG_ENA_RKEK;
asr_aes_write_n(dd, GEU_INIT_KEY(0), key, SIZE_IN_WORDS(keylen));
}
asr_aes_write(dd, GEU_CONFIG, config);
asr_aes_write_mask(dd, GEU_STATUS, GEU_STATUS_ROUND_KEY_START, 0);
ret = asr_aes_wait_for_key_ready(dd);
if (ret)
return -1;
if (iv && (flags & AES_FLAGS_OPMODE_MASK) == AES_FLAGS_CBC) {
asr_aes_write_block(dd, GEU_INIT_IV(0), iv);
asr_aes_write_mask(dd, GEU_CONFIG, GEU_CFG_WRITE_IV, 0);
}
return 0;
}
static inline int asr_aes_write_ctrl(struct asr_geu_aes *dd, bool use_rkek,
const u32 *iv)
{
return asr_aes_write_ctrl_key(dd, use_rkek, iv,
dd->ctx->key, dd->ctx->keylen);
}
/* CPU transfer */
static int asr_aes_cpu_transfer(struct asr_geu_aes *dd)
{
int err = 0;
u32 status;
for (;;) {
asr_aes_read_block(dd, GEU_OUT_DATA(0), dd->data);
dd->data += 4;
dd->datalen -= AES_BLOCK_SIZE;
if (dd->datalen < AES_BLOCK_SIZE)
break;
asr_aes_write_block(dd, GEU_IN_DATA(0), dd->data);
asr_aes_write_mask(dd, GEU_STATUS, GEU_STATUS_DATA_ENCDEC_ENA, 0);
if (dd->int_mode) {
status = asr_aes_read(dd, GEU_STATUS);
if (unlikely(status & GEU_STATUS_DATAO_READY)) {
continue;
}
return -EINPROGRESS;
} else {
err = asr_aes_wait_for_data_ready(dd);
if (err)
break;
}
}
if (!sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
dd->buf, dd->total))
err = -EINVAL;
if (err)
return asr_aes_complete(dd, err);
return dd->cpu_transfer_complete(dd);
}
static int asr_aes_cpu_start(struct asr_geu_aes *dd,
struct scatterlist *src,
struct scatterlist *dst,
size_t len,
asr_aes_fn_t resume)
{
int ret;
u32 status;
size_t padlen = asr_aes_padlen(len, AES_BLOCK_SIZE);
if (unlikely(len == 0))
return -EINVAL;
sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
dd->total = len;
dd->real_dst = dst;
dd->cpu_transfer_complete = resume;
dd->resume = asr_aes_cpu_transfer;
dd->datalen = len + padlen;
dd->data = (u32 *)dd->buf;
asr_aes_write_block(dd, GEU_IN_DATA(0), dd->data);
if (dd->int_mode) {
asr_aes_write_mask(dd, GEU_CONFIG, GEU_CFG_DATA_IMR, 0);
asr_aes_write_mask(dd, GEU_STATUS, GEU_STATUS_DATA_ENCDEC_ENA, 0);
status = asr_aes_read(dd, GEU_STATUS);
if (unlikely(status & GEU_STATUS_DATAO_READY))
return asr_aes_cpu_transfer(dd);
return -EINPROGRESS;
}
asr_aes_write_mask(dd, GEU_STATUS, GEU_STATUS_DATA_ENCDEC_ENA, 0);
ret = asr_aes_wait_for_data_ready(dd);
if (ret)
return -EFAULT;
return asr_aes_cpu_transfer(dd);
}
/* DMA transfer */
static void asr_aes_dma_callback(void *data);
static bool asr_aes_check_aligned(struct asr_geu_aes *dd,
struct scatterlist *sg,
size_t len,
struct asr_aes_dma *dma)
{
int nents;
if (!IS_ALIGNED(len, dd->ctx->block_size))
return false;
for (nents = 0; sg; sg = sg_next(sg), ++nents) {
if (!IS_ALIGNED(sg->offset, sizeof(u32)))
return false;
if (len <= sg->length) {
if (!IS_ALIGNED(len, dd->ctx->block_size))
return false;
dma->nents = nents+1;
dma->remainder = sg->length - len;
sg->length = len;
return true;
}
if (!IS_ALIGNED(sg->length, dd->ctx->block_size))
return false;
len -= sg->length;
}
return false;
}
static inline void asr_aes_restore_sg(const struct asr_aes_dma *dma)
{
struct scatterlist *sg = dma->sg;
int nents = dma->nents;
if (!dma->remainder)
return;
while (--nents > 0 && sg)
sg = sg_next(sg);
if (!sg)
return;
sg->length += dma->remainder;
}
static int asr_aes_dma_map(struct asr_geu_aes *dd,
struct scatterlist *src,
struct scatterlist *dst,
size_t len)
{
bool src_aligned, dst_aligned;
size_t padlen;
dd->total = len;
dd->src.sg = src;
dd->dst.sg = dst;
dd->real_dst = dst;
src_aligned = asr_aes_check_aligned(dd, src, len, &dd->src);
if (src == dst)
dst_aligned = src_aligned;
else
dst_aligned = asr_aes_check_aligned(dd, dst, len, &dd->dst);
if (!src_aligned || !dst_aligned) {
padlen = asr_aes_padlen(len, dd->ctx->block_size);
if (dd->buflen < len + padlen)
return -ENOMEM;
if (!src_aligned) {
sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
dd->src.sg = &dd->aligned_sg;
dd->src.nents = 1;
dd->src.remainder = 0;
}
if (!dst_aligned) {
dd->dst.sg = &dd->aligned_sg;
dd->dst.nents = 1;
dd->dst.remainder = 0;
}
sg_init_table(&dd->aligned_sg, 1);
sg_set_buf(&dd->aligned_sg, dd->buf, len + padlen);
}
if (dd->src.sg == dd->dst.sg) {
dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
DMA_BIDIRECTIONAL);
dd->dst.sg_len = dd->src.sg_len;
if (!dd->src.sg_len)
return -EFAULT;
} else {
dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
DMA_TO_DEVICE);
if (!dd->src.sg_len)
return -EFAULT;
dd->dst.sg_len = dma_map_sg(dd->dev, dd->dst.sg, dd->dst.nents,
DMA_FROM_DEVICE);
if (!dd->dst.sg_len) {
dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
DMA_TO_DEVICE);
return -EFAULT;
}
}
return 0;
}
static void asr_aes_dma_unmap(struct asr_geu_aes *dd)
{
if (dd->src.sg == dd->dst.sg) {
dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
DMA_BIDIRECTIONAL);
if (dd->src.sg != &dd->aligned_sg)
asr_aes_restore_sg(&dd->src);
} else {
dma_unmap_sg(dd->dev, dd->dst.sg, dd->dst.nents,
DMA_FROM_DEVICE);
if (dd->dst.sg != &dd->aligned_sg)
asr_aes_restore_sg(&dd->dst);
dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
DMA_TO_DEVICE);
if (dd->src.sg != &dd->aligned_sg)
asr_aes_restore_sg(&dd->src);
}
if (dd->dst.sg == &dd->aligned_sg)
sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
dd->buf, dd->total);
}
static int asr_aes_dma_transfer_start(struct asr_geu_aes *dd,
enum dma_slave_buswidth addr_width,
enum dma_transfer_direction dir,
u32 maxburst)
{
struct dma_async_tx_descriptor *desc;
struct dma_slave_config config;
dma_async_tx_callback callback;
struct asr_aes_dma *dma;
int err;
memset(&config, 0, sizeof(config));
config.direction = dir;
config.src_addr_width = addr_width;
config.dst_addr_width = addr_width;
config.src_maxburst = maxburst;
config.dst_maxburst = maxburst;
switch (dir) {
case DMA_MEM_TO_DEV:
dma = &dd->src;
callback = NULL;
config.dst_addr = dd->phys_base + GEU_IN_DATA(0);
break;
case DMA_DEV_TO_MEM:
dma = &dd->dst;
callback = asr_aes_dma_callback;
config.src_addr = dd->phys_base + GEU_OUT_DATA(0);
break;
default:
return -EINVAL;
}
err = dmaengine_slave_config(dma->chan, &config);
if (err)
return err;
desc = dmaengine_prep_slave_sg(dma->chan, dma->sg, dma->sg_len, dir,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
return -ENOMEM;
desc->callback = callback;
desc->callback_param = dd;
dmaengine_submit(desc);
dma_async_issue_pending(dma->chan);
return 0;
}
static void asr_aes_dma_transfer_stop(struct asr_geu_aes *dd,
enum dma_transfer_direction dir)
{
struct asr_aes_dma *dma;
switch (dir) {
case DMA_MEM_TO_DEV:
dma = &dd->src;
break;
case DMA_DEV_TO_MEM:
dma = &dd->dst;
break;
default:
return;
}
dmaengine_terminate_all(dma->chan);
}
static int asr_aes_dma_start(struct asr_geu_aes *dd,
struct scatterlist *src,
struct scatterlist *dst,
size_t len,
asr_aes_fn_t resume)
{
enum dma_slave_buswidth addr_width;
u32 maxburst;
int err;
if (dd->ctx->block_size == AES_BLOCK_SIZE) {
addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
maxburst = 16;
} else {
err = -EINVAL;
goto exit;
}
err = asr_aes_dma_map(dd, src, dst, len);
if (err)
goto exit;
dd->resume = resume;
/* Set output DMA transfer first */
err = asr_aes_dma_transfer_start(dd, addr_width, DMA_MEM_TO_DEV,
maxburst);
if (err)
goto unmap;
/* Then set input DMA transfer */
err = asr_aes_dma_transfer_start(dd, addr_width, DMA_DEV_TO_MEM,
maxburst);
if (err)
goto output_transfer_stop;
asr_aes_enable_dma(dd);
return -EINPROGRESS;
output_transfer_stop:
asr_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
unmap:
asr_aes_dma_unmap(dd);
exit:
return asr_aes_complete(dd, err);
}
static void asr_aes_dma_stop(struct asr_geu_aes *dd)
{
asr_aes_dma_transfer_stop(dd, DMA_MEM_TO_DEV);
asr_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
asr_aes_dma_unmap(dd);
}
static void asr_aes_dma_callback(void *data)
{
struct asr_geu_aes *dd = data;
asr_aes_dma_stop(dd);
dd->is_async = true;
(void)dd->resume(dd);
}
static int asr_aes_handle_queue(struct asr_geu_aes *dd,
struct crypto_async_request *new_areq)
{
struct crypto_async_request *areq, *backlog;
struct asr_aes_ctx *ctx;
unsigned long flags;
bool start_async;
int err, ret = 0;
spin_lock_irqsave(&dd->lock, flags);
if (new_areq)
ret = crypto_enqueue_request(&dd->queue, new_areq);
if (dd->flags & AES_FLAGS_BUSY) {
spin_unlock_irqrestore(&dd->lock, flags);
return ret;
}
backlog = crypto_get_backlog(&dd->queue);
areq = crypto_dequeue_request(&dd->queue);
if (areq) {
dd->flags |= AES_FLAGS_BUSY;
}
spin_unlock_irqrestore(&dd->lock, flags);
if (!areq)
return ret;
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
ctx = crypto_tfm_ctx(areq->tfm);
dd->areq = areq;
dd->ctx = ctx;
start_async = (areq != new_areq);
dd->is_async = start_async;
/* WARNING: ctx->start() MAY change dd->is_async. */
err = ctx->start(dd);
return (start_async) ? ret : err;
}
/* AES async block ciphers */
static int asr_aes_transfer_complete(struct asr_geu_aes *dd)
{
return asr_aes_complete(dd, 0);
}
static int asr_aes_start(struct asr_geu_aes *dd)
{
struct skcipher_request *req = skcipher_request_cast(dd->areq);
struct asr_aes_reqctx *rctx = skcipher_request_ctx(req);
struct asr_geu_dev *geu_dd = dev_get_drvdata(dd->dev);
struct asr_geu_ops *geu_ops = geu_dd->geu_ops;
int err;
//bool use_rkek;
bool use_dma = (req->cryptlen >= ASR_AES_DMA_THRESHOLD ||
dd->ctx->block_size != AES_BLOCK_SIZE);
geu_ops->dev_get(geu_dd);
asr_aes_set_mode(dd, rctx);
err = asr_aes_hw_init(dd);
if (err)
return asr_aes_complete(dd, err);
asr_aes_write_ctrl(dd, rctx->use_rkek, (u32 *)req->iv);
if (use_dma)
return asr_aes_dma_start(dd, req->src, req->dst, req->cryptlen,
asr_aes_transfer_complete);
return asr_aes_cpu_start(dd, req->src, req->dst, req->cryptlen,
asr_aes_transfer_complete);
}
static int asr_aes_crypt(struct skcipher_request *req, unsigned long mode)
{
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
struct asr_aes_ctx *ctx = crypto_skcipher_ctx(cipher);
struct asr_aes_reqctx *rctx;
struct asr_geu_aes *dd = asr_aes_local;
ctx->block_size = AES_BLOCK_SIZE;
ctx->dd = dd;
rctx = skcipher_request_ctx(req);
rctx->mode = mode;
rctx->use_rkek = ctx->use_rkek;
if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) {
unsigned int ivsize = crypto_skcipher_ivsize(cipher);
if (req->cryptlen >= ivsize) {
scatterwalk_map_and_copy(rctx->lastc, req->src,
req->cryptlen - ivsize,
ivsize, 0);
}
}
return asr_aes_handle_queue(dd, &req->base);
}
static int asr_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
struct asr_aes_ctx *ctx = crypto_skcipher_ctx(cipher);
ctx->use_rkek = false;
if (keylen != AES_KEYSIZE_128 &&
keylen != AES_KEYSIZE_192 &&
keylen != AES_KEYSIZE_256) {
crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
return 0;
}
static int asr_aes_set_hwkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
struct asr_aes_ctx *ctx = crypto_skcipher_ctx(cipher);
struct asr_geu_aes *dd = asr_aes_local;
(void)key;
if (!dd->rkek_burned)
return -EPERM;
ctx->use_rkek = true;
if (keylen != AES_KEYSIZE_128 &&
keylen != AES_KEYSIZE_192 &&
keylen != AES_KEYSIZE_256) {
crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
memset(ctx->key, 0, AES_KEYSIZE_256);
ctx->keylen = keylen;
return 0;
}
static int asr_aes_ecb_encrypt(struct skcipher_request *req)
{
return asr_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
}
static int asr_aes_ecb_decrypt(struct skcipher_request *req)
{
return asr_aes_crypt(req, AES_FLAGS_ECB);
}
static int asr_aes_cbc_encrypt(struct skcipher_request *req)
{
return asr_aes_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
}
static int asr_aes_cbc_decrypt(struct skcipher_request *req)
{
return asr_aes_crypt(req, AES_FLAGS_CBC);
}
static int asr_aes_init(struct crypto_skcipher *tfm)
{
struct asr_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
tfm->reqsize = sizeof(struct asr_aes_reqctx);
ctx->start = asr_aes_start;
return 0;
}
static int asr_aes_hwkey_init(struct crypto_skcipher *tfm)
{
struct asr_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
struct asr_geu_aes *dd = asr_aes_local;
if (!dd->rkek_burned)
return -EPERM;
tfm->reqsize = sizeof(struct asr_aes_reqctx);
ctx->start = asr_aes_start;
return 0;
}
static void asr_aes_exit(struct crypto_skcipher *tfm)
{
struct asr_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
memset(ctx, 0, sizeof(*ctx));
}
static struct skcipher_alg aes_algs[] = {
/* AES - ECB */
{
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "asr-ecb-aes",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct asr_aes_ctx),
.cra_alignmask = 0xf,
.cra_module = THIS_MODULE,
},
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE | ASR_AES_HWKEY,
.setkey = asr_aes_setkey,
.encrypt = asr_aes_ecb_encrypt,
.decrypt = asr_aes_ecb_decrypt,
.init = asr_aes_init,
.exit = asr_aes_exit,
},
/* AES - CBC */
{
.base = {
.cra_name = "cbc(aes)",
.cra_driver_name = "asr-cbc-aes",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct asr_aes_ctx),
.cra_alignmask = 0xf,
.cra_module = THIS_MODULE,
},
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE | ASR_AES_HWKEY,
.setkey = asr_aes_setkey,
.encrypt = asr_aes_cbc_encrypt,
.decrypt = asr_aes_cbc_decrypt,
.init = asr_aes_init,
.exit = asr_aes_exit,
.ivsize = AES_BLOCK_SIZE,
},
/* AES - ECB, using hardware key, a.k.a. RKEK */
{
.base = {
.cra_name = "ecb(aes-hwkey)",
.cra_driver_name = "asr-ecb-aes-hwkey",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct asr_aes_ctx),
.cra_alignmask = 0xf,
.cra_module = THIS_MODULE,
},
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE | ASR_AES_HWKEY,
.setkey = asr_aes_set_hwkey,
.encrypt = asr_aes_ecb_encrypt,
.decrypt = asr_aes_ecb_decrypt,
.init = asr_aes_hwkey_init,
.exit = asr_aes_exit,
},
/* AES - CBC, using hardware key, a.k.a. RKEK */
{
.base = {
.cra_name = "cbc(aes-hwkey)",
.cra_driver_name = "asr-cbc-aes-hwkey",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct asr_aes_ctx),
.cra_alignmask = 0xf,
.cra_module = THIS_MODULE,
},
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE | ASR_AES_HWKEY,
.setkey = asr_aes_set_hwkey,
.encrypt = asr_aes_cbc_encrypt,
.decrypt = asr_aes_cbc_decrypt,
.init = asr_aes_hwkey_init,
.exit = asr_aes_exit,
.ivsize = AES_BLOCK_SIZE,
},
};
static int asr_aes_buff_init(struct asr_geu_aes *dd)
{
dd->buf = (void *)__get_free_pages(GFP_KERNEL, ASR_AES_BUFFER_ORDER);
dd->buflen = ASR_AES_BUFFER_SIZE;
dd->buflen &= ~(AES_BLOCK_SIZE - 1);
if (!dd->buf) {
dev_err(dd->dev, "unable to alloc pages.\n");
return -ENOMEM;
}
return 0;
}
static void asr_aes_buff_cleanup(struct asr_geu_aes *dd)
{
free_pages((unsigned long)dd->buf, ASR_AES_BUFFER_ORDER);
}
static int asr_aes_dma_init(struct asr_geu_aes *dd)
{
dma_cap_mask_t mask;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
/* Try to grab 2 DMA channels */
dd->src.chan = dma_request_slave_channel_compat(mask, NULL,
NULL, dd->dev, "tx");
if (!dd->src.chan)
goto err_dma_in;
dd->dst.chan = dma_request_slave_channel_compat(mask, NULL,
NULL, dd->dev, "rx");
if (!dd->dst.chan)
goto err_dma_out;
return 0;
err_dma_out:
dma_release_channel(dd->src.chan);
err_dma_in:
dev_warn(dd->dev, "no DMA channel available\n");
return -ENODEV;
}
static void asr_aes_dma_cleanup(struct asr_geu_aes *dd)
{
dma_release_channel(dd->dst.chan);
dma_release_channel(dd->src.chan);
}
static void asr_aes_queue_task(unsigned long data)
{
struct asr_geu_aes *dd = (struct asr_geu_aes *)data;
asr_aes_handle_queue(dd, NULL);
}
static void asr_aes_done_task(unsigned long data)
{
struct asr_geu_aes *dd = (struct asr_geu_aes *)data;
dd->is_async = true;
(void)dd->resume(dd);
}
int asr_geu_aes_register(struct asr_geu_dev *geu_dd)
{
int err, i, j;
struct device_node *np = NULL;
struct asr_geu_aes *aes_dd;
aes_dd = &geu_dd->asr_aes;
aes_dd->dev = geu_dd->dev;
aes_dd->io_base = geu_dd->io_base;
aes_dd->phys_base = geu_dd->phys_base;
np = aes_dd->dev->of_node;
if (of_get_property(np, "asr,aes-int-mode", NULL))
aes_dd->int_mode = 1;
else
aes_dd->int_mode = 0;
if (aes_dd->int_mode)
aes_dd->aes_irq = asr_aes_irq;
else
aes_dd->aes_irq = NULL;
aes_dd->rkek_burned = asr_aes_rkek_fused(aes_dd);
err = asr_aes_buff_init(aes_dd);
if (err)
return err;
err = asr_aes_dma_init(aes_dd);
if (err)
return err;
asr_aes_local = aes_dd;
spin_lock_init(&aes_dd->lock);
tasklet_init(&aes_dd->done_task, asr_aes_done_task,
(unsigned long)aes_dd);
tasklet_init(&aes_dd->queue_task, asr_aes_queue_task,
(unsigned long)aes_dd);
crypto_init_queue(&aes_dd->queue, ASR_AES_QUEUE_LENGTH);
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
err = crypto_register_skcipher(&aes_algs[i]);
if (err){
for (j = 0; j < i; j++)
crypto_unregister_skcipher(&aes_algs[j]);
return err;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(asr_geu_aes_register);
int asr_geu_aes_unregister(struct asr_geu_dev *geu_dd)
{
int i;
struct asr_geu_aes *aes_dd = &geu_dd->asr_aes;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
crypto_unregister_skcipher(&aes_algs[i]);
tasklet_kill(&aes_dd->done_task);
tasklet_kill(&aes_dd->queue_task);
asr_aes_buff_cleanup(aes_dd);
asr_aes_dma_cleanup(aes_dd);
return 0;
}
EXPORT_SYMBOL_GPL(asr_geu_aes_unregister);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yu Zhang <yuzhang@asrmicro.com>");
MODULE_DESCRIPTION("ASR AES cipher driver");