ASR_BASE
Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/linux/drivers/crypto/asr/bcm/asr-cipher.c b/marvell/linux/drivers/crypto/asr/bcm/asr-cipher.c
new file mode 100644
index 0000000..9be5ca4
--- /dev/null
+++ b/marvell/linux/drivers/crypto/asr/bcm/asr-cipher.c
@@ -0,0 +1,1370 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/hw_random.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/cputype.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <linux/jiffies.h>
+#include <crypto/aes.h>
+#include <crypto/sm4.h>
+#include <crypto/internal/skcipher.h>
+#include "asr-bcm.h"
+#include "asr-cipher.h"
+
+#define CIPHER_BLOCK_SIZE AES_BLOCK_SIZE
+#define CIPHER_MIN_KEY_SIZE AES_MIN_KEY_SIZE
+#define CIPHER_MAX_KEY_SIZE AES_MAX_KEY_SIZE
+
+static struct asr_bcm_cipher *asr_cipher_local = NULL;
+
+static inline u32 asr_cipher_read(struct asr_bcm_cipher *dd, u32 offset)
+{
+ u32 value = readl_relaxed(dd->io_base + offset);
+
+ return value;
+}
+
+static inline void asr_cipher_write(struct asr_bcm_cipher *dd,
+ u32 offset, u32 value)
+{
+ writel_relaxed(value, dd->io_base + offset);
+}
+
+static inline void cipher_cache_operation(void *addr, int size)
+{
+ __cpuc_flush_dcache_area(addr, size);
+}
+
+
+/* hardware handle */
+static void crypto_aes_sw_reset(struct asr_bcm_cipher *dd)
+{
+ uint32_t val;
+
+ val = 0x1;
+ asr_cipher_write(dd, CRYPTO_AES_CONTROL_REG, val);
+ val = 0x0;
+ asr_cipher_write(dd, CRYPTO_AES_CONTROL_REG, val);
+
+ return;
+}
+
+static void crypto_aes_start(struct asr_bcm_cipher *dd)
+{
+ uint32_t val;
+
+ val = 0x1;
+ asr_cipher_write(dd, CRYPTO_AES_COMMAND_REG, val);
+
+ return;
+}
+
+static int crypto_aes_wait(struct asr_bcm_cipher *dd)
+{
+ uint32_t val;
+
+ val = asr_cipher_read(dd, CRYPTO_AES_INTRPT_SRC_REG);
+ asr_cipher_write(dd, CRYPTO_AES_INTRPT_SRC_REG, val);
+
+ return 0;
+}
+
+static int crypto_engine_select(struct asr_bcm_cipher *dd, CRYPTO_ENG_SEL_T engine)
+{
+ uint32_t val;
+
+ val = asr_cipher_read(dd, CRYPTO_ENGINE_SEL_REG);
+ val &= ~0x3;
+
+ switch (engine) {
+ case ENG_AES:
+ val |= 0x1;
+ break;
+ case ENG_DES:
+ val |= 0x2;
+ break;
+ case ENG_RC4:
+ val |= 0x3;
+ break;
+ default:
+ dev_err(dd->dev, "Illegal engine %d\n", engine);
+ return -1;
+ }
+
+ asr_cipher_write(dd, CRYPTO_ENGINE_SEL_REG, val);
+
+ return 0;
+}
+
+static int crypto_aes_set_iv(struct asr_bcm_cipher *dd, const uint8_t *iv)
+{
+ uint32_t val;
+ int reg_index;
+
+ if (iv == NULL)
+ return -1;
+
+ for (reg_index = 0; reg_index < 4; reg_index++) {
+ val = ((iv[(reg_index << 2) +0] & 0xFF) << 0) | \
+ ((iv[(reg_index << 2) + 1] & 0xFF) << 8) | \
+ ((iv[(reg_index << 2) + 2] & 0xFF) << 16) | \
+ ((iv[(reg_index << 2) + 3] & 0xFF) << 24);
+ asr_cipher_write(dd, CRYPTO_IV_REG(reg_index), val);
+ }
+
+ return 0;
+}
+
+static int crypto_aes_get_iv(struct asr_bcm_cipher *dd, uint8_t *iv)
+{
+ uint32_t val;
+ int reg_index;
+
+ if (iv == NULL)
+ return -1;
+
+ for (reg_index = 0; reg_index < 4; reg_index++) {
+ val = asr_cipher_read(dd, CRYPTO_IV_REG(reg_index));
+ iv[(reg_index << 2) +0] = val & 0xFF;
+ iv[(reg_index << 2) +1] = (val >> 8) & 0xFF;
+ iv[(reg_index << 2) +2] = (val >> 16) & 0xFF;
+ iv[(reg_index << 2) +3] = (val >> 24) & 0xFF;
+ }
+
+ return 0;
+}
+
+static int crypto_aes_set_mode(struct asr_bcm_cipher *dd,
+ AES_MODE_T mode, AES_OP_MODE_T op_mode,
+ AES_KEY_LEN_T keylen, bool use_rkey)
+{
+ uint32_t val;
+
+ crypto_engine_select(dd, ENG_AES);
+ val = asr_cipher_read(dd, CRYPTO_AES_CONFIG_REG);
+ val &= ~(0x7 << 0x3);
+
+ switch (mode) {
+ case AES_ECB_ALG:
+ val |= (0x0 << 0x3);
+ break;
+ case AES_CBC_ALG:
+ val |= (0x1 << 0x3);
+ break;
+ case AES_CTR_ALG:
+ val |= (0x2 << 0x3);
+ break;
+ case AES_XTS_ALG:
+ val |= (0x3 << 0x3);
+ break;
+ case AES_KEYWRAP:
+ val |= (0x4 << 0x3);
+ break;
+ default:
+ dev_err(dd->dev, "Illegal aes mode %d\n", mode);
+ return -1;
+ }
+
+ val &= ~(0x3 << 0x1);
+ switch (keylen) {
+ case AES_128:
+ val |= (0x0 << 0x1);
+ break;
+ case AES_192:
+ val |= (0x2 << 0x1);
+ break;
+ case AES_256:
+ val |= (0x1 << 0x1);
+ break;
+ default:
+ dev_err(dd->dev, "Illegal aes keylen %d\n", mode);
+ return -1;
+ }
+
+ val &= ~(0x1 << 0x0);
+ if (op_mode == AES_DECRYPT_OP) {
+ val |= (0x1 << 0x0);
+ } else {
+ val |= (0x0 << 0x0);
+ }
+
+ val &= ~(0x1 << 0x6);
+ if (use_rkey == false) {
+ val |= (0x0 << 0x6);
+ } else {
+ val |= (0x1 << 0x6);
+ }
+ asr_cipher_write(dd, CRYPTO_AES_CONFIG_REG, val);
+
+ return 0;
+}
+
+static int crypto_aes_set_key1(struct asr_bcm_cipher *dd, const uint8_t *key, AES_KEY_LEN_T keylen)
+{
+ uint32_t val;
+ int reg_index, key_end;
+
+ if (!key)
+ return 0;
+
+ switch (keylen) {
+ case AES_128:
+ key_end = 4;
+ break;
+ case AES_192:
+ key_end = 6;
+ break;
+ case AES_256:
+ key_end = 8;
+ break;
+ default:
+ key_end = 0;
+ dev_err(dd->dev, "Illegal aes keylen %d\n", keylen);
+ return -1;
+ }
+
+ for (reg_index = 0; reg_index < 8; reg_index++) {
+ if (reg_index < key_end) {
+ val = ((key[(reg_index << 2) +0] & 0xFF) << 0) | \
+ ((key[(reg_index << 2) + 1] & 0xFF) << 8) | \
+ ((key[(reg_index << 2) + 2] & 0xFF) << 16) | \
+ ((key[(reg_index << 2) + 3] & 0xFF) << 24);
+ } else {
+ val = 0;
+ }
+ asr_cipher_write(dd, CRYPTO_K1_W_REG(reg_index), val);
+ }
+
+ return 0;
+}
+
+static int crypto_aes_set_key2(struct asr_bcm_cipher *dd, const uint8_t *key, AES_KEY_LEN_T keylen)
+{
+ uint32_t val;
+ int reg_index, key_end;
+
+ if (!key)
+ return 0;
+
+ switch (keylen) {
+ case AES_128:
+ key_end = 4;
+ break;
+ case AES_192:
+ key_end = 6;
+ break;
+ case AES_256:
+ key_end = 8;
+ break;
+ default:
+ key_end = 0;
+ dev_err(dd->dev, "Illegal aes keylen %d\n", keylen);
+ return -1;
+ }
+
+ for (reg_index = 0; reg_index < 8; reg_index++) {
+ if (reg_index < key_end) {
+ val = ((key[(reg_index << 2) +0] & 0xFF) << 0) | \
+ ((key[(reg_index << 2) + 1] & 0xFF) << 8) | \
+ ((key[(reg_index << 2) + 2] & 0xFF) << 16) | \
+ ((key[(reg_index << 2) + 3] & 0xFF) << 24);
+ } else {
+ val = 0;
+ }
+ asr_cipher_write(dd, CRYPTO_K2_W_REG(reg_index), val);
+ }
+
+ return 0;
+}
+
+static void __maybe_unused *align_ptr_malloc(int size, int align_bytes)
+{
+ void *base_ptr = NULL;
+ void *mem_ptr = NULL;
+
+ base_ptr = kmalloc((size + align_bytes), GFP_KERNEL);
+ mem_ptr = (void *)((uint32_t)((uint32_t)base_ptr + align_bytes - 1) & ~(align_bytes - 1));
+ if (mem_ptr == base_ptr) {
+ mem_ptr = (void *)((uint32_t)base_ptr + align_bytes);
+ }
+ *((uint32_t *)mem_ptr - 1) = (uint32_t)mem_ptr - (uint32_t)base_ptr;
+ return mem_ptr;
+}
+
+static void __maybe_unused align_ptr_free(void *ptr)
+{
+ void *base_addr = NULL;
+ base_addr = (void *)((uint32_t)ptr - *((uint32_t *)ptr - 1));
+ kfree(base_addr);
+ return;
+}
+
+static void __maybe_unused free_dma_chain(DMA_DESC_T *header)
+{
+ DMA_DESC_T *p = header, *q = NULL;
+
+ while(p) {
+ if (p->next_desc) {
+ q = phys_to_virt(p->next_desc);
+ align_ptr_free(p);
+ p = q;
+ } else {
+ align_ptr_free(p);
+ break;
+ }
+ }
+
+ return;
+}
+
+static DMA_DESC_T __maybe_unused *alloc_dma_chain(uint32_t vaddr, uint32_t size)
+{
+ uint32_t paddr_s = virt_to_phys((void *)vaddr);
+ uint32_t paddr_e = virt_to_phys((void *)(vaddr + size));
+ DMA_DESC_T *header = NULL;
+ DMA_DESC_T *p = NULL, *q = NULL;
+ uint32_t vaddr_tmp = vaddr;
+
+ /* descriptor must be aligned to 16 bytes */
+ header = align_ptr_malloc(sizeof(DMA_DESC_T), 16);
+ if (header == NULL) {
+ return NULL;
+ }
+
+ /* handle continous physical memory area */
+ if (paddr_s + size == paddr_e) {
+ header->paddr = (uint32_t) paddr_s;
+ header->size = size >> 2;
+ header->next_desc = 0;
+ header->reserved = 0;
+ cipher_cache_operation((char *)header, sizeof(DMA_DESC_T));
+ return header;
+ }
+
+ /* handle non-continous physical memory area */
+ p = header;
+ header->paddr = (uint32_t) paddr_s;
+ header->size = ((uint32_t)(PAGE_SIZE - (paddr_s & (PAGE_SIZE - 1)))) >> 2;
+ header->next_desc = 0;
+ header->reserved = 0;
+
+ while (1) {
+ if ((p->paddr + (p->size << 2)) == virt_to_phys((void *)(vaddr_tmp + (p->size << 2))))
+ p->size += PAGE_SIZE >> 2;
+ else {
+ vaddr_tmp += (p->size << 2);
+ /* descriptor must be aligned to 16 bytes */
+ q = align_ptr_malloc(sizeof(DMA_DESC_T), 16);
+ if (q == NULL) {
+ free_dma_chain(header);
+ return NULL;
+ }
+ q->paddr = (uint32_t)virt_to_phys((void *)vaddr_tmp);
+ q->size = PAGE_SIZE >> 2;
+ q->next_desc = 0;
+ p->next_desc = (uint32_t)(virt_to_phys(q));
+ cipher_cache_operation((char *)p, sizeof(DMA_DESC_T));
+ p = q;
+ }
+ if (p->paddr + (p->size << 2) > paddr_e) {
+ p->size -= ((uint32_t)(PAGE_SIZE - (paddr_e & (PAGE_SIZE - 1)))) >> 2;
+ cipher_cache_operation((char *)p, sizeof(DMA_DESC_T));
+ break;
+ }
+ }
+
+ return header;
+}
+
+static int rkek_cfg_init(struct asr_bcm_cipher *dd, int hwkey_select)
+{
+#define CIU_SYSSEC_CTRL1 (0x5C)
+
+ uint32_t value;
+ struct device_node *np;
+ struct resource res;
+ void __iomem *io_base;
+
+ /* set rkek or ssk */
+ np = of_find_compatible_node(NULL, NULL, "marvell,mmp-ciu");
+ if (!np) {
+ dev_err(dd->dev, "can't find ciu node for set opt key sel");
+ return -1;
+ }
+
+ if (of_address_to_resource(np, 0, &res)) {
+ return -1;
+ }
+
+ io_base = ioremap(res.start, res.end - res.start);
+ if (!io_base) {
+ dev_err(dd->dev, "geu regs can't remap");
+ return 0;
+ }
+
+ value = readl_relaxed(io_base + CIU_SYSSEC_CTRL1);
+ if (hwkey_select == RK_KEY) {
+ value &= ~(1 << 22);
+ } else if (hwkey_select == SSK_KEY) {
+ value |= (1 << 22);
+ } else {
+ return -1;
+ }
+ writel_relaxed(value, io_base + CIU_SYSSEC_CTRL1);
+
+ iounmap(io_base);
+ return 0;
+}
+
+static int aes_nblocks(struct asr_bcm_cipher *dd, AES_OP_MODE_T op_mode,
+ const uint8_t *in , uint8_t *out, unsigned long blocks,
+ const symmetric_key *skey1, const symmetric_key *skey2, AES_MODE_T mode, uint8_t *iv)
+{
+ int ret = 0;
+ int key_real_length;
+ int hwkey_select;
+ uint32_t pos, time_start;
+ uint8_t tmp[16];
+ DMA_DESC_T *in_list, *out_list;
+ uint8_t *key_data;
+ struct asr_bcm_dev *dev_dd = container_of(dd, struct asr_bcm_dev, asr_cipher);
+
+ /* save last block of in for encryption result check */
+ pos = (blocks - 1) * 16;
+ memcpy(tmp, in + pos, 16);
+ memcpy(out + pos, in + pos, 16);
+
+ in_list = alloc_dma_chain((uint32_t)in, blocks << 4);
+ if (!in_list)
+ return -1;
+
+ out_list = alloc_dma_chain((uint32_t)out, blocks << 4);
+ if (!out_list) {
+ free_dma_chain(in_list);
+ return -1;
+ }
+
+ adec_engine_hw_reset(dev_dd, ACC_ENG_DMA);
+ adec_engine_hw_reset(dev_dd, ACC_ENG_CRYPTO);
+ abus_set_mode(dev_dd, ABUS_GRP_A_HASH, ABUS_GRP_B_AES, ABUS_STRAIGHT, ABUS_STRAIGHT);
+ crypto_aes_sw_reset(dd);
+
+ /* HW requires abs(rid - wid) > 2 */
+ dma_input_config(dev_dd, 0, 0);
+ dma_output_config(dev_dd, 0, 4);
+ ret = dma_input_address(dev_dd, (uint32_t)virt_to_phys((void *)in_list), 0, true);
+ if (ret != 0) {
+ dev_err(dd->dev, "dma_input_address error.");
+ goto exit;
+ }
+
+ ret = dma_output_address(dev_dd, (uint32_t)virt_to_phys((void *)out_list), 0, true);
+ if (ret != 0) {
+ dev_err(dd->dev, "dma_input_address error.");
+ goto exit;
+ }
+
+ /* Process key1 */
+ if (skey1 == NULL) {
+ goto exit;
+ }
+ key_real_length = skey1->rijndael.Nr & ~(0x3);
+ hwkey_select = skey1->rijndael.Nr & 0x3;
+
+ if (op_mode == AES_ENCRYPT_OP) {
+ key_data = (uint8_t *)skey1->rijndael.eK;
+ } else if (op_mode == AES_DECRYPT_OP) {
+ key_data = (uint8_t *)skey1->rijndael.dK;
+ } else {
+ goto exit;
+ }
+
+ switch (hwkey_select) {
+ case EXT_KEY: /* use provide key */
+ ret = crypto_aes_set_mode(dd, mode, op_mode, key_real_length / BYTES_TO_BITS, false);
+ if (ret) {
+ goto exit;
+ }
+ ret = crypto_aes_set_key1(dd, key_data, key_real_length / BYTES_TO_BITS);
+ if (ret) {
+ goto exit;
+ }
+ break;
+ case RK_KEY: /* use root key */
+ ret = crypto_aes_set_mode(dd, mode, op_mode, key_real_length / BYTES_TO_BITS, true);
+ if (ret) {
+ goto exit;
+ }
+ ret = rkek_cfg_init(dd, RK_KEY);
+ if (ret) {
+ goto exit;
+ }
+ break;
+ case SSK_KEY: /* use ssk key */
+ ret = crypto_aes_set_mode(dd, mode, op_mode, key_real_length / BYTES_TO_BITS, true);
+ if (ret) {
+ goto exit;
+ }
+ ret = rkek_cfg_init(dd, SSK_KEY);
+ if (ret) {
+ goto exit;
+ }
+ break;
+ default:
+ return -1;
+ goto exit;
+ }
+
+ /* Process IV and XTS key2 here */
+ switch(mode) {
+ case AES_XTS_ALG:
+ if (skey2 == NULL) {
+ goto exit;
+ }
+ key_real_length = skey2->rijndael.Nr & ~(0x3);
+ ret = crypto_aes_set_key2(dd, (uint8_t *)skey2->rijndael.eK, key_real_length / BYTES_TO_BITS);
+ if (ret) {
+ goto exit;
+ }
+ break;
+ case AES_CBC_ALG:
+ case AES_CTR_ALG:
+ ret = crypto_aes_set_iv(dd, iv);
+ if (ret != 0) {
+ goto exit;
+ }
+ break;
+ case AES_ECB_ALG:
+ break;
+ default:
+ goto exit;
+ }
+
+ asr_cipher_write(dd, CRYPTO_AES_STREAM_SIZE_REG, blocks << 4);
+ cipher_cache_operation((char *)in, blocks << 4);
+ cipher_cache_operation((char *)out, blocks << 4);
+
+ dma_output_start(dev_dd);
+ udelay(1);
+ crypto_aes_start(dd);
+ udelay(1);
+ dma_input_start(dev_dd);
+
+ ret = dma_wait_output_finish(dev_dd);
+ if (ret)
+ goto exit;
+ ret = crypto_aes_wait(dd);
+ if (ret)
+ goto exit;
+ ret = dma_wait_input_finish(dev_dd);
+ if (ret)
+ goto exit;
+
+ /* Process IV */
+ switch(mode) {
+ case AES_XTS_ALG:
+ case AES_CBC_ALG:
+ case AES_CTR_ALG:
+ ret = crypto_aes_get_iv(dd, iv);
+ if (ret != 0) {
+ goto exit;
+ }
+ break;
+ case AES_ECB_ALG:
+ break;
+ default:
+ goto exit;
+ }
+
+ time_start = jiffies;
+ /* make sure dma data transfered to DDR by checking last block of out changes */
+ while (!memcmp(out + pos, tmp, 16)) {
+
+ cipher_cache_operation(out+pos, 16);
+
+ if ((jiffies - time_start) > 500) {
+ dev_err(dd->dev, "Encryption: plaintext ciphertext are the same !!!");
+ break;
+ }
+ }
+
+exit:
+ free_dma_chain(in_list);
+ free_dma_chain(out_list);
+ return ret;
+}
+
+/* ciphers */
+static int se_rijndael_setup_internal(const uint8_t *key, int keylen, symmetric_key *skey)
+{
+ int key_real_length;
+ int hwkey_select;
+
+ if (!skey || keylen <= 0) {
+ return -1;
+ }
+
+ key_real_length = keylen & ~(0x3);
+ hwkey_select = keylen & 0x3;
+ switch (hwkey_select) {
+ case EXT_KEY: /* use provide key */
+ if ((!key) || (key_real_length > (int)(BYTES_TO_BITS * sizeof(skey->rijndael.eK)))
+ || (key_real_length > (int)(BYTES_TO_BITS * sizeof(skey->rijndael.dK)))) {
+ return -1;
+ }
+ memcpy(skey->rijndael.eK, key, key_real_length / BYTES_TO_BITS);
+ memcpy(skey->rijndael.dK, key, key_real_length / BYTES_TO_BITS);
+ break;
+ case RK_KEY: /* use huk */
+ case SSK_KEY: /* use ssk */
+ skey->rijndael.Nr = keylen;
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static int se_rijndael_setup(const uint8_t *key, int keylen, symmetric_key *skey)
+{
+ return se_rijndael_setup_internal(key, (((keylen & ~0x3) * BYTES_TO_BITS) | (keylen & 0x3)), skey);
+}
+
+static int se_rijndael_ecb_decrypt(struct asr_bcm_cipher *dd, const uint8_t *ct, uint8_t *pt,
+ const symmetric_key *skey)
+{
+ return aes_nblocks(dd, AES_DECRYPT_OP, ct, pt, 1, skey, NULL, AES_ECB_ALG, NULL);
+}
+
+static int _aes_handle_noalign(struct asr_bcm_cipher *dd, AES_OP_MODE_T op_mode,
+ const uint8_t *in,uint8_t *out, uint32_t length,
+ const symmetric_key *skey1, const symmetric_key *skey2,
+ AES_MODE_T mode, uint8_t *iv)
+{
+ int ret = 0;
+ uint32_t len_bytes = (length + 0xf) & (~0xf);
+ uint8_t *in_cpy = NULL, *out_cpy = NULL;
+ uint8_t *in_work = NULL, *out_work = NULL;
+ uint8_t *aligned_buf_in = NULL, *aligned_buf_out = NULL;
+ int size;
+
+ if (((uint32_t)out & 0x3) || ((uint32_t)in & 0x3) || (len_bytes > length)) {
+ in_cpy = (uint8_t *)in;
+ out_cpy = (uint8_t *)out;
+
+ /* if length is not a multiple of 16, zero padding */
+ if (((uint32_t)in & 0x3) || (len_bytes > length)) {
+ aligned_buf_in = kmalloc(min((int)len_bytes, WORK_BUF_SIZE), GFP_KERNEL);
+ if (!aligned_buf_in)
+ return -1;
+ memset(aligned_buf_in, 0, min((int)len_bytes, WORK_BUF_SIZE));
+ }
+
+ if (((uint32_t)out & 0x3) || (len_bytes > length)) {
+ aligned_buf_out = kmalloc(min((int)len_bytes, WORK_BUF_SIZE), GFP_KERNEL);
+ if (!aligned_buf_out)
+ return -1;
+ }
+
+ while (len_bytes) {
+ size = min((int)len_bytes, WORK_BUF_SIZE);
+
+ if ((uint32_t)in & 0x3) {
+ memcpy(aligned_buf_in, in_cpy, size);
+ in_work = aligned_buf_in;
+ } else {
+ in_work = in_cpy;
+ }
+
+ if ((uint32_t)out & 0x3) {
+ memset(aligned_buf_out, 0x0, size);
+ out_work = aligned_buf_out;
+ } else {
+ out_work = out_cpy;
+ }
+
+ ret = aes_nblocks(dd, op_mode, in_work, out_work, size >> 4, skey1, skey2, mode, iv);
+ if (ret)
+ goto exit;
+
+ if ((uint32_t) out & 0x3)
+ memcpy(out_cpy, aligned_buf_out, size);
+
+ if (mode == AES_XTS_ALG && len_bytes != 0 && (len_bytes > WORK_BUF_SIZE)) {
+ symmetric_key *skey_local = kmalloc(sizeof(symmetric_key), GFP_KERNEL);
+ if (!skey_local) {
+ ret = -1;
+ goto exit;
+ }
+
+ ret = se_rijndael_setup((uint8_t *)skey2->rijndael.eK,
+ (skey2->rijndael.Nr/BYTES_TO_BITS), skey_local);
+ if (ret) {
+ kfree(skey_local);
+ goto exit;
+ }
+
+ ret = se_rijndael_ecb_decrypt(dd, iv, iv, skey_local);
+ if (ret) {
+ kfree(skey_local);
+ goto exit;
+ }
+
+ kfree(skey_local);
+ }
+
+ out_cpy += size;
+ in_cpy += size;
+ len_bytes -= size;
+ }
+exit:
+ if (aligned_buf_in)
+ kfree(aligned_buf_in);
+ if (aligned_buf_out)
+ kfree(aligned_buf_out);
+ } else {
+ ret = aes_nblocks(dd, op_mode, in, out, len_bytes >> 4, skey1, skey2, mode, iv);
+ }
+
+ return ret;
+}
+
+static int aes_handle_noalign(struct asr_bcm_cipher *dd, AES_MODE_T mode, AES_OP_MODE_T op_mode, AES_KEY_SELECT_T key_select,
+ const uint8_t *key1, uint32_t keylen1, const uint8_t *key2, uint32_t keylen2,
+ const uint8_t *in, uint8_t *out, uint32_t size, uint8_t *iv)
+{
+ int ret;
+ symmetric_key *pskey1, *pskey2;
+
+ pskey1 = kmalloc(sizeof(symmetric_key), GFP_KERNEL);
+ if (!pskey1) {
+ return -1;
+ }
+
+ pskey2 = kmalloc(sizeof(symmetric_key), GFP_KERNEL);
+ if (!pskey2) {
+ kfree(pskey1);
+ return -1;
+ }
+
+ memset(pskey1, 0, sizeof(symmetric_key));
+ memset(pskey1, 0, sizeof(symmetric_key));
+
+ if (op_mode == AES_ENCRYPT_OP) {
+ pskey1->rijndael.eK = (uint32_t *)key1;
+ } else if (op_mode == AES_DECRYPT_OP) {
+ pskey1->rijndael.dK = (uint32_t *)key1;
+ }
+
+ if (key_select == EXT_KEY) {
+ pskey1->rijndael.Nr = (keylen1 * BYTES_TO_BITS) & (~0x3);
+ } else if (key_select == RK_KEY) {
+ pskey1->rijndael.Nr = keylen1 * BYTES_TO_BITS | 0x1;
+ } else if (key_select == SSK_KEY) {
+ pskey1->rijndael.Nr = keylen1 * BYTES_TO_BITS | 0x2;
+ } else {
+ return -1;
+ }
+
+ if (mode == AES_XTS_ALG) {
+ if (op_mode == AES_ENCRYPT_OP) {
+ pskey2->rijndael.eK = (uint32_t *)key2;
+ pskey2->rijndael.Nr = keylen2 * BYTES_TO_BITS;
+ } else if (op_mode == AES_DECRYPT_OP) {
+ pskey2->rijndael.dK = (uint32_t *)key2;
+ pskey2->rijndael.Nr = keylen2 * BYTES_TO_BITS;
+ }
+ ret = _aes_handle_noalign(dd, op_mode, in, out, size, pskey1, pskey2, mode, iv);
+ } else {
+ ret = _aes_handle_noalign(dd, op_mode, in, out, size, pskey1, NULL, mode, iv);
+ }
+
+ kfree(pskey1);
+ kfree(pskey2);
+ return ret;
+}
+
+/* crypto framework */
+static void asr_cipher_set_iv_as_last_ciphertext_block(struct asr_bcm_cipher*dd)
+{
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
+ struct asr_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(cipher);
+
+ if (req->cryptlen < ivsize)
+ return;
+
+ if (rctx->mode & FLAGS_ENCRYPT) {
+ scatterwalk_map_and_copy(req->iv, req->dst,
+ req->cryptlen - ivsize, ivsize, 0);
+ } else {
+ if (req->src == req->dst)
+ memcpy(req->iv, rctx->lastc, ivsize);
+ else
+ scatterwalk_map_and_copy(req->iv, req->src,
+ req->cryptlen - ivsize,
+ ivsize, 0);
+ }
+}
+
+static int asr_cipher_complete(struct asr_bcm_cipher *dd, int err)
+{
+ struct asr_bcm_dev *bcm_dd = dev_get_drvdata(dd->dev);
+ struct asr_bcm_ops *bcm_ops = bcm_dd->bcm_ops;
+
+ dd->flags &= ~FLAGS_BUSY;
+
+ asr_cipher_set_iv_as_last_ciphertext_block(dd);
+
+ if (dd->is_async)
+ dd->areq->complete(dd->areq, err);
+
+ bcm_ops->dev_put(bcm_dd);
+
+ tasklet_schedule(&dd->queue_task);
+
+ return err;
+}
+
+static int asr_complete(struct asr_bcm_cipher *dd)
+{
+ return asr_cipher_complete(dd, 0);
+}
+
+static inline size_t asr_cipher_padlen(size_t len, size_t block_size)
+{
+ len &= block_size - 1;
+ return len ? block_size - len : 0;
+}
+
+static int asr_cipher_buff_init(struct asr_bcm_cipher *dd, uint32_t len)
+{
+ dd->buf = (void *)__get_free_pages(GFP_KERNEL, get_order(len));
+
+ if (!dd->buf) {
+ dev_err(dd->dev, "unable to alloc pages.\n");
+ return -ENOMEM;
+ }
+
+ dd->buflen = PAGE_SIZE << get_order(len);
+
+ return 0;
+}
+
+static void asr_cipher_buff_cleanup(struct asr_bcm_cipher *dd, uint32_t len)
+{
+ free_pages((unsigned long)dd->buf, get_order(len));
+ dd->buflen = 0;
+}
+
+static inline void asr_cipher_get(struct asr_bcm_cipher *dd)
+{
+ mutex_lock(&dd->cipher_lock);
+}
+
+static inline void asr_cipher_put(struct asr_bcm_cipher *dd)
+{
+ if(mutex_is_locked(&dd->cipher_lock))
+ mutex_unlock(&dd->cipher_lock);
+}
+
+static int asr_sca_cipher_process(struct asr_bcm_cipher *dd,
+ struct skcipher_request *req, asr_cipher_fn_t resume)
+{
+ int ret;
+ size_t padlen = asr_cipher_padlen(req->cryptlen, CIPHER_BLOCK_SIZE);
+ struct asr_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ AES_MODE_T mode;
+ AES_OP_MODE_T op_mode;
+ AES_KEY_SELECT_T key_select;
+
+ asr_cipher_get(dd);
+
+ if (unlikely(req->cryptlen == 0)) {
+ asr_cipher_put(dd);
+ return -EINVAL;
+ }
+
+ dd->datalen = req->cryptlen + padlen;
+ ret = asr_cipher_buff_init(dd, dd->datalen);
+ if (ret) {
+ asr_cipher_put(dd);
+ return ret;
+ }
+
+ sg_copy_to_buffer(req->src, sg_nents(req->src), dd->buf, req->cryptlen);
+
+ dd->total = req->cryptlen;
+ dd->real_dst = req->dst;
+ dd->resume = resume;
+ dd->data = (u32 *)dd->buf;
+
+ if ((dd->flags & FLAGS_ENCRYPT))
+ op_mode = AES_ENCRYPT_OP;
+ else
+ op_mode = AES_DECRYPT_OP;
+
+ if ((dd->flags & FLAGS_OPMODE_MASK) == FLAGS_ECB)
+ mode = AES_ECB_ALG;
+ else if ((dd->flags & FLAGS_OPMODE_MASK) == FLAGS_CBC)
+ mode = AES_CBC_ALG;
+ else if ((dd->flags & FLAGS_OPMODE_MASK) == FLAGS_CTR)
+ mode = AES_CTR_ALG;
+
+ if (rctx->use_rkek) {
+ key_select = RK_KEY;
+ } else {
+ key_select = EXT_KEY;
+ }
+
+ ret = aes_handle_noalign(dd, mode, op_mode, key_select, (uint8_t *)dd->ctx->key,
+ dd->ctx->keylen, NULL, 0, (const uint8_t *)dd->data, (uint8_t *)dd->data,
+ dd->datalen, req->iv);
+ if (ret)
+ ret = -EINVAL;
+
+ if (!sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
+ dd->buf, dd->total))
+ ret = -EINVAL;
+
+ asr_cipher_buff_cleanup(dd, dd->datalen);
+ asr_cipher_put(dd);
+
+ return asr_cipher_complete(dd, ret);
+}
+
+static inline void asr_cipher_set_mode(struct asr_bcm_cipher *dd,
+ const struct asr_cipher_reqctx *rctx)
+{
+ /* Clear all but persistent flags and set request flags. */
+ dd->flags = (dd->flags & CIPHER_FLAGS_PERSISTENT) | rctx->mode;
+}
+
+static int asr_cipher_start(struct asr_bcm_cipher *dd)
+{
+ struct skcipher_request *req = skcipher_request_cast(dd->areq);
+ struct asr_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct asr_bcm_dev *bcm_dd = dev_get_drvdata(dd->dev);
+ struct asr_bcm_ops *bcm_ops = bcm_dd->bcm_ops;
+
+ bcm_ops->dev_get(bcm_dd);
+
+ asr_cipher_set_mode(dd, rctx);
+ return asr_sca_cipher_process(dd, req, asr_complete);
+}
+
+static int asr_cipher_handle_queue(struct asr_bcm_cipher *dd,
+ struct crypto_async_request *new_areq)
+{
+ struct crypto_async_request *areq, *backlog;
+ struct asr_cipher_ctx *ctx;
+ unsigned long flags;
+ bool start_async;
+ int err, ret = 0;
+
+ spin_lock_irqsave(&dd->lock, flags);
+ if (new_areq)
+ ret = crypto_enqueue_request(&dd->queue, new_areq);
+ if (dd->flags & FLAGS_BUSY) {
+ spin_unlock_irqrestore(&dd->lock, flags);
+ return ret;
+ }
+
+ backlog = crypto_get_backlog(&dd->queue);
+ areq = crypto_dequeue_request(&dd->queue);
+ if (areq) {
+ dd->flags |= FLAGS_BUSY;
+ }
+ spin_unlock_irqrestore(&dd->lock, flags);
+ if (!areq)
+ return ret;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ ctx = crypto_tfm_ctx(areq->tfm);
+ dd->areq = areq;
+ dd->ctx = ctx;
+ start_async = (areq != new_areq);
+ dd->is_async = start_async;
+
+ /* WARNING: ctx->start() MAY change dd->is_async. */
+ err = ctx->start(dd);
+ return (start_async) ? ret : err;
+}
+
+static int asr_cipher(struct skcipher_request *req, unsigned long mode)
+{
+ int ret;
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct asr_cipher_reqctx *rctx;
+
+ ctx->block_size = CIPHER_BLOCK_SIZE;
+ rctx = skcipher_request_ctx(req);
+ rctx->mode = mode;
+ rctx->use_rkek = ctx->use_rkek;
+
+ if (!(mode & FLAGS_ENCRYPT) && (req->src == req->dst)) {
+ unsigned int ivsize = crypto_skcipher_ivsize(cipher);
+ if (req->cryptlen >= ivsize) {
+ scatterwalk_map_and_copy(rctx->lastc, req->src,
+ req->cryptlen - ivsize,
+ ivsize, 0);
+ }
+ }
+
+ ret = asr_cipher_handle_queue(ctx->dd, &req->base);
+
+ asr_cipher_put(ctx->dd);
+ return ret;
+}
+
+static int asr_cipher_setkey(struct crypto_skcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct asr_bcm_cipher *dd = asr_cipher_local;
+
+ ctx->dd = dd;
+ ctx->use_rkek = false;
+
+ if (keylen != AES_KEYSIZE_128 &&
+ keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256) {
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return 0;
+}
+
+static int asr_cipher_set_hwkey(struct crypto_skcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct asr_bcm_cipher *dd = asr_cipher_local;
+
+ ctx->dd = dd;
+ if (!dd->rkek_burned)
+ return -EPERM;
+
+ ctx->use_rkek = true;
+
+ if (keylen != AES_KEYSIZE_128 &&
+ keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256) {
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return 0;
+}
+
+static int asr_cipher_rkek_fused(struct asr_bcm_cipher *dd)
+{
+#define GEU_KSTR_BANK6_LCS (0x0168)
+#define GEU_KSTR_LCS_DM_BASE (3)
+#define GEU_KSTR_LCS_MASK (0x7)
+
+ uint32_t value;
+ struct device_node *np;
+ struct resource res;
+ void __iomem *io_base;
+
+ /* get geu node */
+ np = of_find_compatible_node(NULL, NULL, "asr,asr-geu");
+ if (!np) {
+ dev_err(dd->dev, "can't find geu node to check rkek burned");
+ return 0;
+ }
+
+ if (of_address_to_resource(np, 0, &res)) {
+ dev_err(dd->dev, "can't find geu address");
+ return 0;
+ }
+
+ io_base = ioremap(res.start, res.end - res.start);
+ if (!io_base) {
+ dev_err(dd->dev, "geu regs can't remap");
+ return 0;
+ }
+
+ value = readl_relaxed(io_base + GEU_KSTR_BANK6_LCS);
+ value >>= GEU_KSTR_LCS_DM_BASE;
+ value &= GEU_KSTR_LCS_MASK;
+ if (hweight32(value) > 1) {
+ iounmap(io_base);
+ return 1;
+ }
+
+ iounmap(io_base);
+ return 0;
+}
+
+static int asr_aes_ecb_encrypt(struct skcipher_request *req)
+{
+ return asr_cipher(req, FLAGS_AES | FLAGS_ECB | FLAGS_ENCRYPT);
+}
+
+static int asr_aes_ecb_decrypt(struct skcipher_request *req)
+{
+ return asr_cipher(req, FLAGS_AES | FLAGS_ECB);
+}
+
+static int asr_aes_cbc_encrypt(struct skcipher_request *req)
+{
+ return asr_cipher(req, FLAGS_AES | FLAGS_CBC | FLAGS_ENCRYPT);
+}
+
+static int asr_aes_cbc_decrypt(struct skcipher_request *req)
+{
+ return asr_cipher(req, FLAGS_AES | FLAGS_CBC);
+}
+
+static int asr_aes_ctr_encrypt(struct skcipher_request *req)
+{
+ return asr_cipher(req, FLAGS_AES | FLAGS_CTR | FLAGS_ENCRYPT);
+}
+
+static int asr_aes_ctr_decrypt(struct skcipher_request *req)
+{
+ return asr_cipher(req, FLAGS_AES | FLAGS_CTR);
+}
+
+static int asr_cipher_init(struct crypto_skcipher *tfm)
+{
+ struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ tfm->reqsize = sizeof(struct asr_cipher_reqctx);
+ ctx->start = asr_cipher_start;
+
+ return 0;
+}
+
+static int asr_cipher_hwkey_init(struct crypto_skcipher *tfm)
+{
+ struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct asr_bcm_cipher *dd = asr_cipher_local;
+
+ if (!dd->rkek_burned)
+ return -EPERM;
+
+ tfm->reqsize = sizeof(struct asr_cipher_reqctx);
+ ctx->start = asr_cipher_start;
+
+ return 0;
+}
+
+static void asr_cipher_exit(struct crypto_skcipher *tfm)
+{
+ struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ memset(ctx, 0, sizeof(*ctx));
+}
+
+static struct skcipher_alg cipher_algs[] = {
+ /* AES - ECB */
+ {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "asr-ecb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct asr_cipher_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = CIPHER_MIN_KEY_SIZE,
+ .max_keysize = CIPHER_MAX_KEY_SIZE,
+ .setkey = asr_cipher_setkey,
+ .encrypt = asr_aes_ecb_encrypt,
+ .decrypt = asr_aes_ecb_decrypt,
+ .init = asr_cipher_init,
+ .exit = asr_cipher_exit,
+ },
+ /* AES - CBC */
+ {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "asr-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct asr_cipher_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = CIPHER_MIN_KEY_SIZE,
+ .max_keysize = CIPHER_MAX_KEY_SIZE,
+ .setkey = asr_cipher_setkey,
+ .encrypt = asr_aes_cbc_encrypt,
+ .decrypt = asr_aes_cbc_decrypt,
+ .init = asr_cipher_init,
+ .exit = asr_cipher_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ /* AES - CTR */
+ {
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "asr-ctr-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct asr_cipher_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = CIPHER_MIN_KEY_SIZE,
+ .max_keysize = CIPHER_MAX_KEY_SIZE,
+ .setkey = asr_cipher_setkey,
+ .encrypt = asr_aes_ctr_encrypt,
+ .decrypt = asr_aes_ctr_decrypt,
+ .init = asr_cipher_init,
+ .exit = asr_cipher_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+
+ /* hardware key AES - ECB */
+ {
+ .base = {
+ .cra_name = "ecb(aes-hwkey)",
+ .cra_driver_name = "asr-ecb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct asr_cipher_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = CIPHER_MIN_KEY_SIZE,
+ .max_keysize = CIPHER_MAX_KEY_SIZE,
+ .setkey = asr_cipher_set_hwkey,
+ .encrypt = asr_aes_ecb_encrypt,
+ .decrypt = asr_aes_ecb_decrypt,
+ .init = asr_cipher_hwkey_init,
+ .exit = asr_cipher_exit,
+ },
+ /* AES - CBC */
+ {
+ .base = {
+ .cra_name = "cbc(aes-hwkey)",
+ .cra_driver_name = "asr-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct asr_cipher_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = CIPHER_MIN_KEY_SIZE,
+ .max_keysize = CIPHER_MAX_KEY_SIZE,
+ .setkey = asr_cipher_set_hwkey,
+ .encrypt = asr_aes_cbc_encrypt,
+ .decrypt = asr_aes_cbc_decrypt,
+ .init = asr_cipher_hwkey_init,
+ .exit = asr_cipher_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+};
+
+static void asr_cipher_queue_task(unsigned long data)
+{
+ struct asr_bcm_cipher *dd = (struct asr_bcm_cipher *)data;
+
+ asr_cipher_handle_queue(dd, NULL);
+}
+
+static void asr_cipher_done_task(unsigned long data)
+{
+ struct asr_bcm_cipher *dd = (struct asr_bcm_cipher *)data;
+
+ dd->is_async = true;
+ (void)dd->resume(dd);
+}
+
+int asr_bcm_cipher_register(struct asr_bcm_dev *bcm_dd)
+{
+ int err, i, j;
+ struct device_node *np = NULL;
+ struct asr_bcm_cipher *cipher_dd;
+
+ cipher_dd = &bcm_dd->asr_cipher;
+ cipher_dd->dev = bcm_dd->dev;
+ cipher_dd->io_base = bcm_dd->io_base;
+ cipher_dd->phys_base = bcm_dd->phys_base;
+
+ np = cipher_dd->dev->of_node;
+
+ cipher_dd->rkek_burned = asr_cipher_rkek_fused(cipher_dd);
+
+ asr_cipher_local = cipher_dd;
+
+ spin_lock_init(&cipher_dd->lock);
+ mutex_init(&cipher_dd->cipher_lock);
+ tasklet_init(&cipher_dd->done_task, asr_cipher_done_task,
+ (unsigned long)cipher_dd);
+ tasklet_init(&cipher_dd->queue_task, asr_cipher_queue_task,
+ (unsigned long)cipher_dd);
+ crypto_init_queue(&cipher_dd->queue, ASR_CIPHER_QUEUE_LENGTH);
+
+ for (i = 0; i < ARRAY_SIZE(cipher_algs); i++) {
+ err = crypto_register_skcipher(&cipher_algs[i]);
+ if (err){
+ for (j = 0; j < i; j++)
+ crypto_unregister_skcipher(&cipher_algs[j]);
+ return err;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(asr_bcm_cipher_register);
+
+int asr_bcm_cipher_unregister(struct asr_bcm_dev *bcm_dd)
+{
+ int i;
+ struct asr_bcm_cipher *cipher_dd = &bcm_dd->asr_cipher;
+
+ for (i = 0; i < ARRAY_SIZE(cipher_algs); i++)
+ crypto_unregister_skcipher(&cipher_algs[i]);
+
+ tasklet_kill(&cipher_dd->done_task);
+ tasklet_kill(&cipher_dd->queue_task);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(asr_bcm_cipher_unregister);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("wangyonggan <yongganwang@asrmicro.com>");
+MODULE_DESCRIPTION("ASR bcm cipher driver");
\ No newline at end of file