blob: 9be5ca4bb678bfaece5cbbd9d3fbaf529bfa3279 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/slab.h>
4#include <linux/err.h>
5#include <linux/clk-provider.h>
6#include <linux/clk.h>
7#include <linux/io.h>
8#include <linux/hw_random.h>
9#include <linux/platform_device.h>
10#include <linux/bitops.h>
11#include <linux/device.h>
12#include <linux/init.h>
13#include <linux/errno.h>
14#include <linux/interrupt.h>
15#include <linux/irq.h>
16#include <linux/scatterlist.h>
17#include <linux/dma-mapping.h>
18#include <linux/dmaengine.h>
19#include <linux/of_device.h>
20#include <linux/of_address.h>
21#include <linux/delay.h>
22#include <linux/crypto.h>
23#include <linux/cputype.h>
24#include <crypto/scatterwalk.h>
25#include <crypto/algapi.h>
26#include <linux/jiffies.h>
27#include <crypto/aes.h>
28#include <crypto/sm4.h>
29#include <crypto/internal/skcipher.h>
30#include "asr-bcm.h"
31#include "asr-cipher.h"
32
33#define CIPHER_BLOCK_SIZE AES_BLOCK_SIZE
34#define CIPHER_MIN_KEY_SIZE AES_MIN_KEY_SIZE
35#define CIPHER_MAX_KEY_SIZE AES_MAX_KEY_SIZE
36
37static struct asr_bcm_cipher *asr_cipher_local = NULL;
38
39static inline u32 asr_cipher_read(struct asr_bcm_cipher *dd, u32 offset)
40{
41 u32 value = readl_relaxed(dd->io_base + offset);
42
43 return value;
44}
45
46static inline void asr_cipher_write(struct asr_bcm_cipher *dd,
47 u32 offset, u32 value)
48{
49 writel_relaxed(value, dd->io_base + offset);
50}
51
52static inline void cipher_cache_operation(void *addr, int size)
53{
54 __cpuc_flush_dcache_area(addr, size);
55}
56
57
58/* hardware handle */
59static void crypto_aes_sw_reset(struct asr_bcm_cipher *dd)
60{
61 uint32_t val;
62
63 val = 0x1;
64 asr_cipher_write(dd, CRYPTO_AES_CONTROL_REG, val);
65 val = 0x0;
66 asr_cipher_write(dd, CRYPTO_AES_CONTROL_REG, val);
67
68 return;
69}
70
71static void crypto_aes_start(struct asr_bcm_cipher *dd)
72{
73 uint32_t val;
74
75 val = 0x1;
76 asr_cipher_write(dd, CRYPTO_AES_COMMAND_REG, val);
77
78 return;
79}
80
81static int crypto_aes_wait(struct asr_bcm_cipher *dd)
82{
83 uint32_t val;
84
85 val = asr_cipher_read(dd, CRYPTO_AES_INTRPT_SRC_REG);
86 asr_cipher_write(dd, CRYPTO_AES_INTRPT_SRC_REG, val);
87
88 return 0;
89}
90
91static int crypto_engine_select(struct asr_bcm_cipher *dd, CRYPTO_ENG_SEL_T engine)
92{
93 uint32_t val;
94
95 val = asr_cipher_read(dd, CRYPTO_ENGINE_SEL_REG);
96 val &= ~0x3;
97
98 switch (engine) {
99 case ENG_AES:
100 val |= 0x1;
101 break;
102 case ENG_DES:
103 val |= 0x2;
104 break;
105 case ENG_RC4:
106 val |= 0x3;
107 break;
108 default:
109 dev_err(dd->dev, "Illegal engine %d\n", engine);
110 return -1;
111 }
112
113 asr_cipher_write(dd, CRYPTO_ENGINE_SEL_REG, val);
114
115 return 0;
116}
117
118static int crypto_aes_set_iv(struct asr_bcm_cipher *dd, const uint8_t *iv)
119{
120 uint32_t val;
121 int reg_index;
122
123 if (iv == NULL)
124 return -1;
125
126 for (reg_index = 0; reg_index < 4; reg_index++) {
127 val = ((iv[(reg_index << 2) +0] & 0xFF) << 0) | \
128 ((iv[(reg_index << 2) + 1] & 0xFF) << 8) | \
129 ((iv[(reg_index << 2) + 2] & 0xFF) << 16) | \
130 ((iv[(reg_index << 2) + 3] & 0xFF) << 24);
131 asr_cipher_write(dd, CRYPTO_IV_REG(reg_index), val);
132 }
133
134 return 0;
135}
136
137static int crypto_aes_get_iv(struct asr_bcm_cipher *dd, uint8_t *iv)
138{
139 uint32_t val;
140 int reg_index;
141
142 if (iv == NULL)
143 return -1;
144
145 for (reg_index = 0; reg_index < 4; reg_index++) {
146 val = asr_cipher_read(dd, CRYPTO_IV_REG(reg_index));
147 iv[(reg_index << 2) +0] = val & 0xFF;
148 iv[(reg_index << 2) +1] = (val >> 8) & 0xFF;
149 iv[(reg_index << 2) +2] = (val >> 16) & 0xFF;
150 iv[(reg_index << 2) +3] = (val >> 24) & 0xFF;
151 }
152
153 return 0;
154}
155
156static int crypto_aes_set_mode(struct asr_bcm_cipher *dd,
157 AES_MODE_T mode, AES_OP_MODE_T op_mode,
158 AES_KEY_LEN_T keylen, bool use_rkey)
159{
160 uint32_t val;
161
162 crypto_engine_select(dd, ENG_AES);
163 val = asr_cipher_read(dd, CRYPTO_AES_CONFIG_REG);
164 val &= ~(0x7 << 0x3);
165
166 switch (mode) {
167 case AES_ECB_ALG:
168 val |= (0x0 << 0x3);
169 break;
170 case AES_CBC_ALG:
171 val |= (0x1 << 0x3);
172 break;
173 case AES_CTR_ALG:
174 val |= (0x2 << 0x3);
175 break;
176 case AES_XTS_ALG:
177 val |= (0x3 << 0x3);
178 break;
179 case AES_KEYWRAP:
180 val |= (0x4 << 0x3);
181 break;
182 default:
183 dev_err(dd->dev, "Illegal aes mode %d\n", mode);
184 return -1;
185 }
186
187 val &= ~(0x3 << 0x1);
188 switch (keylen) {
189 case AES_128:
190 val |= (0x0 << 0x1);
191 break;
192 case AES_192:
193 val |= (0x2 << 0x1);
194 break;
195 case AES_256:
196 val |= (0x1 << 0x1);
197 break;
198 default:
199 dev_err(dd->dev, "Illegal aes keylen %d\n", mode);
200 return -1;
201 }
202
203 val &= ~(0x1 << 0x0);
204 if (op_mode == AES_DECRYPT_OP) {
205 val |= (0x1 << 0x0);
206 } else {
207 val |= (0x0 << 0x0);
208 }
209
210 val &= ~(0x1 << 0x6);
211 if (use_rkey == false) {
212 val |= (0x0 << 0x6);
213 } else {
214 val |= (0x1 << 0x6);
215 }
216 asr_cipher_write(dd, CRYPTO_AES_CONFIG_REG, val);
217
218 return 0;
219}
220
221static int crypto_aes_set_key1(struct asr_bcm_cipher *dd, const uint8_t *key, AES_KEY_LEN_T keylen)
222{
223 uint32_t val;
224 int reg_index, key_end;
225
226 if (!key)
227 return 0;
228
229 switch (keylen) {
230 case AES_128:
231 key_end = 4;
232 break;
233 case AES_192:
234 key_end = 6;
235 break;
236 case AES_256:
237 key_end = 8;
238 break;
239 default:
240 key_end = 0;
241 dev_err(dd->dev, "Illegal aes keylen %d\n", keylen);
242 return -1;
243 }
244
245 for (reg_index = 0; reg_index < 8; reg_index++) {
246 if (reg_index < key_end) {
247 val = ((key[(reg_index << 2) +0] & 0xFF) << 0) | \
248 ((key[(reg_index << 2) + 1] & 0xFF) << 8) | \
249 ((key[(reg_index << 2) + 2] & 0xFF) << 16) | \
250 ((key[(reg_index << 2) + 3] & 0xFF) << 24);
251 } else {
252 val = 0;
253 }
254 asr_cipher_write(dd, CRYPTO_K1_W_REG(reg_index), val);
255 }
256
257 return 0;
258}
259
260static int crypto_aes_set_key2(struct asr_bcm_cipher *dd, const uint8_t *key, AES_KEY_LEN_T keylen)
261{
262 uint32_t val;
263 int reg_index, key_end;
264
265 if (!key)
266 return 0;
267
268 switch (keylen) {
269 case AES_128:
270 key_end = 4;
271 break;
272 case AES_192:
273 key_end = 6;
274 break;
275 case AES_256:
276 key_end = 8;
277 break;
278 default:
279 key_end = 0;
280 dev_err(dd->dev, "Illegal aes keylen %d\n", keylen);
281 return -1;
282 }
283
284 for (reg_index = 0; reg_index < 8; reg_index++) {
285 if (reg_index < key_end) {
286 val = ((key[(reg_index << 2) +0] & 0xFF) << 0) | \
287 ((key[(reg_index << 2) + 1] & 0xFF) << 8) | \
288 ((key[(reg_index << 2) + 2] & 0xFF) << 16) | \
289 ((key[(reg_index << 2) + 3] & 0xFF) << 24);
290 } else {
291 val = 0;
292 }
293 asr_cipher_write(dd, CRYPTO_K2_W_REG(reg_index), val);
294 }
295
296 return 0;
297}
298
299static void __maybe_unused *align_ptr_malloc(int size, int align_bytes)
300{
301 void *base_ptr = NULL;
302 void *mem_ptr = NULL;
303
304 base_ptr = kmalloc((size + align_bytes), GFP_KERNEL);
305 mem_ptr = (void *)((uint32_t)((uint32_t)base_ptr + align_bytes - 1) & ~(align_bytes - 1));
306 if (mem_ptr == base_ptr) {
307 mem_ptr = (void *)((uint32_t)base_ptr + align_bytes);
308 }
309 *((uint32_t *)mem_ptr - 1) = (uint32_t)mem_ptr - (uint32_t)base_ptr;
310 return mem_ptr;
311}
312
313static void __maybe_unused align_ptr_free(void *ptr)
314{
315 void *base_addr = NULL;
316 base_addr = (void *)((uint32_t)ptr - *((uint32_t *)ptr - 1));
317 kfree(base_addr);
318 return;
319}
320
321static void __maybe_unused free_dma_chain(DMA_DESC_T *header)
322{
323 DMA_DESC_T *p = header, *q = NULL;
324
325 while(p) {
326 if (p->next_desc) {
327 q = phys_to_virt(p->next_desc);
328 align_ptr_free(p);
329 p = q;
330 } else {
331 align_ptr_free(p);
332 break;
333 }
334 }
335
336 return;
337}
338
339static DMA_DESC_T __maybe_unused *alloc_dma_chain(uint32_t vaddr, uint32_t size)
340{
341 uint32_t paddr_s = virt_to_phys((void *)vaddr);
342 uint32_t paddr_e = virt_to_phys((void *)(vaddr + size));
343 DMA_DESC_T *header = NULL;
344 DMA_DESC_T *p = NULL, *q = NULL;
345 uint32_t vaddr_tmp = vaddr;
346
347 /* descriptor must be aligned to 16 bytes */
348 header = align_ptr_malloc(sizeof(DMA_DESC_T), 16);
349 if (header == NULL) {
350 return NULL;
351 }
352
353 /* handle continous physical memory area */
354 if (paddr_s + size == paddr_e) {
355 header->paddr = (uint32_t) paddr_s;
356 header->size = size >> 2;
357 header->next_desc = 0;
358 header->reserved = 0;
359 cipher_cache_operation((char *)header, sizeof(DMA_DESC_T));
360 return header;
361 }
362
363 /* handle non-continous physical memory area */
364 p = header;
365 header->paddr = (uint32_t) paddr_s;
366 header->size = ((uint32_t)(PAGE_SIZE - (paddr_s & (PAGE_SIZE - 1)))) >> 2;
367 header->next_desc = 0;
368 header->reserved = 0;
369
370 while (1) {
371 if ((p->paddr + (p->size << 2)) == virt_to_phys((void *)(vaddr_tmp + (p->size << 2))))
372 p->size += PAGE_SIZE >> 2;
373 else {
374 vaddr_tmp += (p->size << 2);
375 /* descriptor must be aligned to 16 bytes */
376 q = align_ptr_malloc(sizeof(DMA_DESC_T), 16);
377 if (q == NULL) {
378 free_dma_chain(header);
379 return NULL;
380 }
381 q->paddr = (uint32_t)virt_to_phys((void *)vaddr_tmp);
382 q->size = PAGE_SIZE >> 2;
383 q->next_desc = 0;
384 p->next_desc = (uint32_t)(virt_to_phys(q));
385 cipher_cache_operation((char *)p, sizeof(DMA_DESC_T));
386 p = q;
387 }
388 if (p->paddr + (p->size << 2) > paddr_e) {
389 p->size -= ((uint32_t)(PAGE_SIZE - (paddr_e & (PAGE_SIZE - 1)))) >> 2;
390 cipher_cache_operation((char *)p, sizeof(DMA_DESC_T));
391 break;
392 }
393 }
394
395 return header;
396}
397
398static int rkek_cfg_init(struct asr_bcm_cipher *dd, int hwkey_select)
399{
400#define CIU_SYSSEC_CTRL1 (0x5C)
401
402 uint32_t value;
403 struct device_node *np;
404 struct resource res;
405 void __iomem *io_base;
406
407 /* set rkek or ssk */
408 np = of_find_compatible_node(NULL, NULL, "marvell,mmp-ciu");
409 if (!np) {
410 dev_err(dd->dev, "can't find ciu node for set opt key sel");
411 return -1;
412 }
413
414 if (of_address_to_resource(np, 0, &res)) {
415 return -1;
416 }
417
418 io_base = ioremap(res.start, res.end - res.start);
419 if (!io_base) {
420 dev_err(dd->dev, "geu regs can't remap");
421 return 0;
422 }
423
424 value = readl_relaxed(io_base + CIU_SYSSEC_CTRL1);
425 if (hwkey_select == RK_KEY) {
426 value &= ~(1 << 22);
427 } else if (hwkey_select == SSK_KEY) {
428 value |= (1 << 22);
429 } else {
430 return -1;
431 }
432 writel_relaxed(value, io_base + CIU_SYSSEC_CTRL1);
433
434 iounmap(io_base);
435 return 0;
436}
437
438static int aes_nblocks(struct asr_bcm_cipher *dd, AES_OP_MODE_T op_mode,
439 const uint8_t *in , uint8_t *out, unsigned long blocks,
440 const symmetric_key *skey1, const symmetric_key *skey2, AES_MODE_T mode, uint8_t *iv)
441{
442 int ret = 0;
443 int key_real_length;
444 int hwkey_select;
445 uint32_t pos, time_start;
446 uint8_t tmp[16];
447 DMA_DESC_T *in_list, *out_list;
448 uint8_t *key_data;
449 struct asr_bcm_dev *dev_dd = container_of(dd, struct asr_bcm_dev, asr_cipher);
450
451 /* save last block of in for encryption result check */
452 pos = (blocks - 1) * 16;
453 memcpy(tmp, in + pos, 16);
454 memcpy(out + pos, in + pos, 16);
455
456 in_list = alloc_dma_chain((uint32_t)in, blocks << 4);
457 if (!in_list)
458 return -1;
459
460 out_list = alloc_dma_chain((uint32_t)out, blocks << 4);
461 if (!out_list) {
462 free_dma_chain(in_list);
463 return -1;
464 }
465
466 adec_engine_hw_reset(dev_dd, ACC_ENG_DMA);
467 adec_engine_hw_reset(dev_dd, ACC_ENG_CRYPTO);
468 abus_set_mode(dev_dd, ABUS_GRP_A_HASH, ABUS_GRP_B_AES, ABUS_STRAIGHT, ABUS_STRAIGHT);
469 crypto_aes_sw_reset(dd);
470
471 /* HW requires abs(rid - wid) > 2 */
472 dma_input_config(dev_dd, 0, 0);
473 dma_output_config(dev_dd, 0, 4);
474 ret = dma_input_address(dev_dd, (uint32_t)virt_to_phys((void *)in_list), 0, true);
475 if (ret != 0) {
476 dev_err(dd->dev, "dma_input_address error.");
477 goto exit;
478 }
479
480 ret = dma_output_address(dev_dd, (uint32_t)virt_to_phys((void *)out_list), 0, true);
481 if (ret != 0) {
482 dev_err(dd->dev, "dma_input_address error.");
483 goto exit;
484 }
485
486 /* Process key1 */
487 if (skey1 == NULL) {
488 goto exit;
489 }
490 key_real_length = skey1->rijndael.Nr & ~(0x3);
491 hwkey_select = skey1->rijndael.Nr & 0x3;
492
493 if (op_mode == AES_ENCRYPT_OP) {
494 key_data = (uint8_t *)skey1->rijndael.eK;
495 } else if (op_mode == AES_DECRYPT_OP) {
496 key_data = (uint8_t *)skey1->rijndael.dK;
497 } else {
498 goto exit;
499 }
500
501 switch (hwkey_select) {
502 case EXT_KEY: /* use provide key */
503 ret = crypto_aes_set_mode(dd, mode, op_mode, key_real_length / BYTES_TO_BITS, false);
504 if (ret) {
505 goto exit;
506 }
507 ret = crypto_aes_set_key1(dd, key_data, key_real_length / BYTES_TO_BITS);
508 if (ret) {
509 goto exit;
510 }
511 break;
512 case RK_KEY: /* use root key */
513 ret = crypto_aes_set_mode(dd, mode, op_mode, key_real_length / BYTES_TO_BITS, true);
514 if (ret) {
515 goto exit;
516 }
517 ret = rkek_cfg_init(dd, RK_KEY);
518 if (ret) {
519 goto exit;
520 }
521 break;
522 case SSK_KEY: /* use ssk key */
523 ret = crypto_aes_set_mode(dd, mode, op_mode, key_real_length / BYTES_TO_BITS, true);
524 if (ret) {
525 goto exit;
526 }
527 ret = rkek_cfg_init(dd, SSK_KEY);
528 if (ret) {
529 goto exit;
530 }
531 break;
532 default:
533 return -1;
534 goto exit;
535 }
536
537 /* Process IV and XTS key2 here */
538 switch(mode) {
539 case AES_XTS_ALG:
540 if (skey2 == NULL) {
541 goto exit;
542 }
543 key_real_length = skey2->rijndael.Nr & ~(0x3);
544 ret = crypto_aes_set_key2(dd, (uint8_t *)skey2->rijndael.eK, key_real_length / BYTES_TO_BITS);
545 if (ret) {
546 goto exit;
547 }
548 break;
549 case AES_CBC_ALG:
550 case AES_CTR_ALG:
551 ret = crypto_aes_set_iv(dd, iv);
552 if (ret != 0) {
553 goto exit;
554 }
555 break;
556 case AES_ECB_ALG:
557 break;
558 default:
559 goto exit;
560 }
561
562 asr_cipher_write(dd, CRYPTO_AES_STREAM_SIZE_REG, blocks << 4);
563 cipher_cache_operation((char *)in, blocks << 4);
564 cipher_cache_operation((char *)out, blocks << 4);
565
566 dma_output_start(dev_dd);
567 udelay(1);
568 crypto_aes_start(dd);
569 udelay(1);
570 dma_input_start(dev_dd);
571
572 ret = dma_wait_output_finish(dev_dd);
573 if (ret)
574 goto exit;
575 ret = crypto_aes_wait(dd);
576 if (ret)
577 goto exit;
578 ret = dma_wait_input_finish(dev_dd);
579 if (ret)
580 goto exit;
581
582 /* Process IV */
583 switch(mode) {
584 case AES_XTS_ALG:
585 case AES_CBC_ALG:
586 case AES_CTR_ALG:
587 ret = crypto_aes_get_iv(dd, iv);
588 if (ret != 0) {
589 goto exit;
590 }
591 break;
592 case AES_ECB_ALG:
593 break;
594 default:
595 goto exit;
596 }
597
598 time_start = jiffies;
599 /* make sure dma data transfered to DDR by checking last block of out changes */
600 while (!memcmp(out + pos, tmp, 16)) {
601
602 cipher_cache_operation(out+pos, 16);
603
604 if ((jiffies - time_start) > 500) {
605 dev_err(dd->dev, "Encryption: plaintext ciphertext are the same !!!");
606 break;
607 }
608 }
609
610exit:
611 free_dma_chain(in_list);
612 free_dma_chain(out_list);
613 return ret;
614}
615
616/* ciphers */
617static int se_rijndael_setup_internal(const uint8_t *key, int keylen, symmetric_key *skey)
618{
619 int key_real_length;
620 int hwkey_select;
621
622 if (!skey || keylen <= 0) {
623 return -1;
624 }
625
626 key_real_length = keylen & ~(0x3);
627 hwkey_select = keylen & 0x3;
628 switch (hwkey_select) {
629 case EXT_KEY: /* use provide key */
630 if ((!key) || (key_real_length > (int)(BYTES_TO_BITS * sizeof(skey->rijndael.eK)))
631 || (key_real_length > (int)(BYTES_TO_BITS * sizeof(skey->rijndael.dK)))) {
632 return -1;
633 }
634 memcpy(skey->rijndael.eK, key, key_real_length / BYTES_TO_BITS);
635 memcpy(skey->rijndael.dK, key, key_real_length / BYTES_TO_BITS);
636 break;
637 case RK_KEY: /* use huk */
638 case SSK_KEY: /* use ssk */
639 skey->rijndael.Nr = keylen;
640 break;
641 default:
642 return -1;
643 }
644
645 return 0;
646}
647
648static int se_rijndael_setup(const uint8_t *key, int keylen, symmetric_key *skey)
649{
650 return se_rijndael_setup_internal(key, (((keylen & ~0x3) * BYTES_TO_BITS) | (keylen & 0x3)), skey);
651}
652
653static int se_rijndael_ecb_decrypt(struct asr_bcm_cipher *dd, const uint8_t *ct, uint8_t *pt,
654 const symmetric_key *skey)
655{
656 return aes_nblocks(dd, AES_DECRYPT_OP, ct, pt, 1, skey, NULL, AES_ECB_ALG, NULL);
657}
658
659static int _aes_handle_noalign(struct asr_bcm_cipher *dd, AES_OP_MODE_T op_mode,
660 const uint8_t *in,uint8_t *out, uint32_t length,
661 const symmetric_key *skey1, const symmetric_key *skey2,
662 AES_MODE_T mode, uint8_t *iv)
663{
664 int ret = 0;
665 uint32_t len_bytes = (length + 0xf) & (~0xf);
666 uint8_t *in_cpy = NULL, *out_cpy = NULL;
667 uint8_t *in_work = NULL, *out_work = NULL;
668 uint8_t *aligned_buf_in = NULL, *aligned_buf_out = NULL;
669 int size;
670
671 if (((uint32_t)out & 0x3) || ((uint32_t)in & 0x3) || (len_bytes > length)) {
672 in_cpy = (uint8_t *)in;
673 out_cpy = (uint8_t *)out;
674
675 /* if length is not a multiple of 16, zero padding */
676 if (((uint32_t)in & 0x3) || (len_bytes > length)) {
677 aligned_buf_in = kmalloc(min((int)len_bytes, WORK_BUF_SIZE), GFP_KERNEL);
678 if (!aligned_buf_in)
679 return -1;
680 memset(aligned_buf_in, 0, min((int)len_bytes, WORK_BUF_SIZE));
681 }
682
683 if (((uint32_t)out & 0x3) || (len_bytes > length)) {
684 aligned_buf_out = kmalloc(min((int)len_bytes, WORK_BUF_SIZE), GFP_KERNEL);
685 if (!aligned_buf_out)
686 return -1;
687 }
688
689 while (len_bytes) {
690 size = min((int)len_bytes, WORK_BUF_SIZE);
691
692 if ((uint32_t)in & 0x3) {
693 memcpy(aligned_buf_in, in_cpy, size);
694 in_work = aligned_buf_in;
695 } else {
696 in_work = in_cpy;
697 }
698
699 if ((uint32_t)out & 0x3) {
700 memset(aligned_buf_out, 0x0, size);
701 out_work = aligned_buf_out;
702 } else {
703 out_work = out_cpy;
704 }
705
706 ret = aes_nblocks(dd, op_mode, in_work, out_work, size >> 4, skey1, skey2, mode, iv);
707 if (ret)
708 goto exit;
709
710 if ((uint32_t) out & 0x3)
711 memcpy(out_cpy, aligned_buf_out, size);
712
713 if (mode == AES_XTS_ALG && len_bytes != 0 && (len_bytes > WORK_BUF_SIZE)) {
714 symmetric_key *skey_local = kmalloc(sizeof(symmetric_key), GFP_KERNEL);
715 if (!skey_local) {
716 ret = -1;
717 goto exit;
718 }
719
720 ret = se_rijndael_setup((uint8_t *)skey2->rijndael.eK,
721 (skey2->rijndael.Nr/BYTES_TO_BITS), skey_local);
722 if (ret) {
723 kfree(skey_local);
724 goto exit;
725 }
726
727 ret = se_rijndael_ecb_decrypt(dd, iv, iv, skey_local);
728 if (ret) {
729 kfree(skey_local);
730 goto exit;
731 }
732
733 kfree(skey_local);
734 }
735
736 out_cpy += size;
737 in_cpy += size;
738 len_bytes -= size;
739 }
740exit:
741 if (aligned_buf_in)
742 kfree(aligned_buf_in);
743 if (aligned_buf_out)
744 kfree(aligned_buf_out);
745 } else {
746 ret = aes_nblocks(dd, op_mode, in, out, len_bytes >> 4, skey1, skey2, mode, iv);
747 }
748
749 return ret;
750}
751
752static int aes_handle_noalign(struct asr_bcm_cipher *dd, AES_MODE_T mode, AES_OP_MODE_T op_mode, AES_KEY_SELECT_T key_select,
753 const uint8_t *key1, uint32_t keylen1, const uint8_t *key2, uint32_t keylen2,
754 const uint8_t *in, uint8_t *out, uint32_t size, uint8_t *iv)
755{
756 int ret;
757 symmetric_key *pskey1, *pskey2;
758
759 pskey1 = kmalloc(sizeof(symmetric_key), GFP_KERNEL);
760 if (!pskey1) {
761 return -1;
762 }
763
764 pskey2 = kmalloc(sizeof(symmetric_key), GFP_KERNEL);
765 if (!pskey2) {
766 kfree(pskey1);
767 return -1;
768 }
769
770 memset(pskey1, 0, sizeof(symmetric_key));
771 memset(pskey1, 0, sizeof(symmetric_key));
772
773 if (op_mode == AES_ENCRYPT_OP) {
774 pskey1->rijndael.eK = (uint32_t *)key1;
775 } else if (op_mode == AES_DECRYPT_OP) {
776 pskey1->rijndael.dK = (uint32_t *)key1;
777 }
778
779 if (key_select == EXT_KEY) {
780 pskey1->rijndael.Nr = (keylen1 * BYTES_TO_BITS) & (~0x3);
781 } else if (key_select == RK_KEY) {
782 pskey1->rijndael.Nr = keylen1 * BYTES_TO_BITS | 0x1;
783 } else if (key_select == SSK_KEY) {
784 pskey1->rijndael.Nr = keylen1 * BYTES_TO_BITS | 0x2;
785 } else {
786 return -1;
787 }
788
789 if (mode == AES_XTS_ALG) {
790 if (op_mode == AES_ENCRYPT_OP) {
791 pskey2->rijndael.eK = (uint32_t *)key2;
792 pskey2->rijndael.Nr = keylen2 * BYTES_TO_BITS;
793 } else if (op_mode == AES_DECRYPT_OP) {
794 pskey2->rijndael.dK = (uint32_t *)key2;
795 pskey2->rijndael.Nr = keylen2 * BYTES_TO_BITS;
796 }
797 ret = _aes_handle_noalign(dd, op_mode, in, out, size, pskey1, pskey2, mode, iv);
798 } else {
799 ret = _aes_handle_noalign(dd, op_mode, in, out, size, pskey1, NULL, mode, iv);
800 }
801
802 kfree(pskey1);
803 kfree(pskey2);
804 return ret;
805}
806
807/* crypto framework */
808static void asr_cipher_set_iv_as_last_ciphertext_block(struct asr_bcm_cipher*dd)
809{
810 struct skcipher_request *req = skcipher_request_cast(dd->areq);
811 struct asr_cipher_reqctx *rctx = skcipher_request_ctx(req);
812 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
813 unsigned int ivsize = crypto_skcipher_ivsize(cipher);
814
815 if (req->cryptlen < ivsize)
816 return;
817
818 if (rctx->mode & FLAGS_ENCRYPT) {
819 scatterwalk_map_and_copy(req->iv, req->dst,
820 req->cryptlen - ivsize, ivsize, 0);
821 } else {
822 if (req->src == req->dst)
823 memcpy(req->iv, rctx->lastc, ivsize);
824 else
825 scatterwalk_map_and_copy(req->iv, req->src,
826 req->cryptlen - ivsize,
827 ivsize, 0);
828 }
829}
830
831static int asr_cipher_complete(struct asr_bcm_cipher *dd, int err)
832{
833 struct asr_bcm_dev *bcm_dd = dev_get_drvdata(dd->dev);
834 struct asr_bcm_ops *bcm_ops = bcm_dd->bcm_ops;
835
836 dd->flags &= ~FLAGS_BUSY;
837
838 asr_cipher_set_iv_as_last_ciphertext_block(dd);
839
840 if (dd->is_async)
841 dd->areq->complete(dd->areq, err);
842
843 bcm_ops->dev_put(bcm_dd);
844
845 tasklet_schedule(&dd->queue_task);
846
847 return err;
848}
849
850static int asr_complete(struct asr_bcm_cipher *dd)
851{
852 return asr_cipher_complete(dd, 0);
853}
854
855static inline size_t asr_cipher_padlen(size_t len, size_t block_size)
856{
857 len &= block_size - 1;
858 return len ? block_size - len : 0;
859}
860
861static int asr_cipher_buff_init(struct asr_bcm_cipher *dd, uint32_t len)
862{
863 dd->buf = (void *)__get_free_pages(GFP_KERNEL, get_order(len));
864
865 if (!dd->buf) {
866 dev_err(dd->dev, "unable to alloc pages.\n");
867 return -ENOMEM;
868 }
869
870 dd->buflen = PAGE_SIZE << get_order(len);
871
872 return 0;
873}
874
875static void asr_cipher_buff_cleanup(struct asr_bcm_cipher *dd, uint32_t len)
876{
877 free_pages((unsigned long)dd->buf, get_order(len));
878 dd->buflen = 0;
879}
880
881static inline void asr_cipher_get(struct asr_bcm_cipher *dd)
882{
883 mutex_lock(&dd->cipher_lock);
884}
885
886static inline void asr_cipher_put(struct asr_bcm_cipher *dd)
887{
888 if(mutex_is_locked(&dd->cipher_lock))
889 mutex_unlock(&dd->cipher_lock);
890}
891
892static int asr_sca_cipher_process(struct asr_bcm_cipher *dd,
893 struct skcipher_request *req, asr_cipher_fn_t resume)
894{
895 int ret;
896 size_t padlen = asr_cipher_padlen(req->cryptlen, CIPHER_BLOCK_SIZE);
897 struct asr_cipher_reqctx *rctx = skcipher_request_ctx(req);
898 AES_MODE_T mode;
899 AES_OP_MODE_T op_mode;
900 AES_KEY_SELECT_T key_select;
901
902 asr_cipher_get(dd);
903
904 if (unlikely(req->cryptlen == 0)) {
905 asr_cipher_put(dd);
906 return -EINVAL;
907 }
908
909 dd->datalen = req->cryptlen + padlen;
910 ret = asr_cipher_buff_init(dd, dd->datalen);
911 if (ret) {
912 asr_cipher_put(dd);
913 return ret;
914 }
915
916 sg_copy_to_buffer(req->src, sg_nents(req->src), dd->buf, req->cryptlen);
917
918 dd->total = req->cryptlen;
919 dd->real_dst = req->dst;
920 dd->resume = resume;
921 dd->data = (u32 *)dd->buf;
922
923 if ((dd->flags & FLAGS_ENCRYPT))
924 op_mode = AES_ENCRYPT_OP;
925 else
926 op_mode = AES_DECRYPT_OP;
927
928 if ((dd->flags & FLAGS_OPMODE_MASK) == FLAGS_ECB)
929 mode = AES_ECB_ALG;
930 else if ((dd->flags & FLAGS_OPMODE_MASK) == FLAGS_CBC)
931 mode = AES_CBC_ALG;
932 else if ((dd->flags & FLAGS_OPMODE_MASK) == FLAGS_CTR)
933 mode = AES_CTR_ALG;
934
935 if (rctx->use_rkek) {
936 key_select = RK_KEY;
937 } else {
938 key_select = EXT_KEY;
939 }
940
941 ret = aes_handle_noalign(dd, mode, op_mode, key_select, (uint8_t *)dd->ctx->key,
942 dd->ctx->keylen, NULL, 0, (const uint8_t *)dd->data, (uint8_t *)dd->data,
943 dd->datalen, req->iv);
944 if (ret)
945 ret = -EINVAL;
946
947 if (!sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
948 dd->buf, dd->total))
949 ret = -EINVAL;
950
951 asr_cipher_buff_cleanup(dd, dd->datalen);
952 asr_cipher_put(dd);
953
954 return asr_cipher_complete(dd, ret);
955}
956
957static inline void asr_cipher_set_mode(struct asr_bcm_cipher *dd,
958 const struct asr_cipher_reqctx *rctx)
959{
960 /* Clear all but persistent flags and set request flags. */
961 dd->flags = (dd->flags & CIPHER_FLAGS_PERSISTENT) | rctx->mode;
962}
963
964static int asr_cipher_start(struct asr_bcm_cipher *dd)
965{
966 struct skcipher_request *req = skcipher_request_cast(dd->areq);
967 struct asr_cipher_reqctx *rctx = skcipher_request_ctx(req);
968 struct asr_bcm_dev *bcm_dd = dev_get_drvdata(dd->dev);
969 struct asr_bcm_ops *bcm_ops = bcm_dd->bcm_ops;
970
971 bcm_ops->dev_get(bcm_dd);
972
973 asr_cipher_set_mode(dd, rctx);
974 return asr_sca_cipher_process(dd, req, asr_complete);
975}
976
977static int asr_cipher_handle_queue(struct asr_bcm_cipher *dd,
978 struct crypto_async_request *new_areq)
979{
980 struct crypto_async_request *areq, *backlog;
981 struct asr_cipher_ctx *ctx;
982 unsigned long flags;
983 bool start_async;
984 int err, ret = 0;
985
986 spin_lock_irqsave(&dd->lock, flags);
987 if (new_areq)
988 ret = crypto_enqueue_request(&dd->queue, new_areq);
989 if (dd->flags & FLAGS_BUSY) {
990 spin_unlock_irqrestore(&dd->lock, flags);
991 return ret;
992 }
993
994 backlog = crypto_get_backlog(&dd->queue);
995 areq = crypto_dequeue_request(&dd->queue);
996 if (areq) {
997 dd->flags |= FLAGS_BUSY;
998 }
999 spin_unlock_irqrestore(&dd->lock, flags);
1000 if (!areq)
1001 return ret;
1002
1003 if (backlog)
1004 backlog->complete(backlog, -EINPROGRESS);
1005
1006 ctx = crypto_tfm_ctx(areq->tfm);
1007 dd->areq = areq;
1008 dd->ctx = ctx;
1009 start_async = (areq != new_areq);
1010 dd->is_async = start_async;
1011
1012 /* WARNING: ctx->start() MAY change dd->is_async. */
1013 err = ctx->start(dd);
1014 return (start_async) ? ret : err;
1015}
1016
1017static int asr_cipher(struct skcipher_request *req, unsigned long mode)
1018{
1019 int ret;
1020 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1021 struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
1022 struct asr_cipher_reqctx *rctx;
1023
1024 ctx->block_size = CIPHER_BLOCK_SIZE;
1025 rctx = skcipher_request_ctx(req);
1026 rctx->mode = mode;
1027 rctx->use_rkek = ctx->use_rkek;
1028
1029 if (!(mode & FLAGS_ENCRYPT) && (req->src == req->dst)) {
1030 unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1031 if (req->cryptlen >= ivsize) {
1032 scatterwalk_map_and_copy(rctx->lastc, req->src,
1033 req->cryptlen - ivsize,
1034 ivsize, 0);
1035 }
1036 }
1037
1038 ret = asr_cipher_handle_queue(ctx->dd, &req->base);
1039
1040 asr_cipher_put(ctx->dd);
1041 return ret;
1042}
1043
1044static int asr_cipher_setkey(struct crypto_skcipher *cipher, const u8 *key,
1045 unsigned int keylen)
1046{
1047 struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
1048 struct asr_bcm_cipher *dd = asr_cipher_local;
1049
1050 ctx->dd = dd;
1051 ctx->use_rkek = false;
1052
1053 if (keylen != AES_KEYSIZE_128 &&
1054 keylen != AES_KEYSIZE_192 &&
1055 keylen != AES_KEYSIZE_256) {
1056 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1057 return -EINVAL;
1058 }
1059
1060 memcpy(ctx->key, key, keylen);
1061 ctx->keylen = keylen;
1062
1063 return 0;
1064}
1065
1066static int asr_cipher_set_hwkey(struct crypto_skcipher *cipher, const u8 *key,
1067 unsigned int keylen)
1068{
1069 struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
1070 struct asr_bcm_cipher *dd = asr_cipher_local;
1071
1072 ctx->dd = dd;
1073 if (!dd->rkek_burned)
1074 return -EPERM;
1075
1076 ctx->use_rkek = true;
1077
1078 if (keylen != AES_KEYSIZE_128 &&
1079 keylen != AES_KEYSIZE_192 &&
1080 keylen != AES_KEYSIZE_256) {
1081 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1082 return -EINVAL;
1083 }
1084
1085 memcpy(ctx->key, key, keylen);
1086 ctx->keylen = keylen;
1087
1088 return 0;
1089}
1090
1091static int asr_cipher_rkek_fused(struct asr_bcm_cipher *dd)
1092{
1093#define GEU_KSTR_BANK6_LCS (0x0168)
1094#define GEU_KSTR_LCS_DM_BASE (3)
1095#define GEU_KSTR_LCS_MASK (0x7)
1096
1097 uint32_t value;
1098 struct device_node *np;
1099 struct resource res;
1100 void __iomem *io_base;
1101
1102 /* get geu node */
1103 np = of_find_compatible_node(NULL, NULL, "asr,asr-geu");
1104 if (!np) {
1105 dev_err(dd->dev, "can't find geu node to check rkek burned");
1106 return 0;
1107 }
1108
1109 if (of_address_to_resource(np, 0, &res)) {
1110 dev_err(dd->dev, "can't find geu address");
1111 return 0;
1112 }
1113
1114 io_base = ioremap(res.start, res.end - res.start);
1115 if (!io_base) {
1116 dev_err(dd->dev, "geu regs can't remap");
1117 return 0;
1118 }
1119
1120 value = readl_relaxed(io_base + GEU_KSTR_BANK6_LCS);
1121 value >>= GEU_KSTR_LCS_DM_BASE;
1122 value &= GEU_KSTR_LCS_MASK;
1123 if (hweight32(value) > 1) {
1124 iounmap(io_base);
1125 return 1;
1126 }
1127
1128 iounmap(io_base);
1129 return 0;
1130}
1131
1132static int asr_aes_ecb_encrypt(struct skcipher_request *req)
1133{
1134 return asr_cipher(req, FLAGS_AES | FLAGS_ECB | FLAGS_ENCRYPT);
1135}
1136
1137static int asr_aes_ecb_decrypt(struct skcipher_request *req)
1138{
1139 return asr_cipher(req, FLAGS_AES | FLAGS_ECB);
1140}
1141
1142static int asr_aes_cbc_encrypt(struct skcipher_request *req)
1143{
1144 return asr_cipher(req, FLAGS_AES | FLAGS_CBC | FLAGS_ENCRYPT);
1145}
1146
1147static int asr_aes_cbc_decrypt(struct skcipher_request *req)
1148{
1149 return asr_cipher(req, FLAGS_AES | FLAGS_CBC);
1150}
1151
1152static int asr_aes_ctr_encrypt(struct skcipher_request *req)
1153{
1154 return asr_cipher(req, FLAGS_AES | FLAGS_CTR | FLAGS_ENCRYPT);
1155}
1156
1157static int asr_aes_ctr_decrypt(struct skcipher_request *req)
1158{
1159 return asr_cipher(req, FLAGS_AES | FLAGS_CTR);
1160}
1161
1162static int asr_cipher_init(struct crypto_skcipher *tfm)
1163{
1164 struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1165
1166 tfm->reqsize = sizeof(struct asr_cipher_reqctx);
1167 ctx->start = asr_cipher_start;
1168
1169 return 0;
1170}
1171
1172static int asr_cipher_hwkey_init(struct crypto_skcipher *tfm)
1173{
1174 struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1175 struct asr_bcm_cipher *dd = asr_cipher_local;
1176
1177 if (!dd->rkek_burned)
1178 return -EPERM;
1179
1180 tfm->reqsize = sizeof(struct asr_cipher_reqctx);
1181 ctx->start = asr_cipher_start;
1182
1183 return 0;
1184}
1185
1186static void asr_cipher_exit(struct crypto_skcipher *tfm)
1187{
1188 struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1189
1190 memset(ctx, 0, sizeof(*ctx));
1191}
1192
1193static struct skcipher_alg cipher_algs[] = {
1194 /* AES - ECB */
1195 {
1196 .base = {
1197 .cra_name = "ecb(aes)",
1198 .cra_driver_name = "asr-ecb-aes",
1199 .cra_priority = 300,
1200 .cra_flags = CRYPTO_ALG_ASYNC,
1201 .cra_blocksize = AES_BLOCK_SIZE,
1202 .cra_ctxsize = sizeof(struct asr_cipher_ctx),
1203 .cra_alignmask = 0xf,
1204 .cra_module = THIS_MODULE,
1205 },
1206 .min_keysize = CIPHER_MIN_KEY_SIZE,
1207 .max_keysize = CIPHER_MAX_KEY_SIZE,
1208 .setkey = asr_cipher_setkey,
1209 .encrypt = asr_aes_ecb_encrypt,
1210 .decrypt = asr_aes_ecb_decrypt,
1211 .init = asr_cipher_init,
1212 .exit = asr_cipher_exit,
1213 },
1214 /* AES - CBC */
1215 {
1216 .base = {
1217 .cra_name = "cbc(aes)",
1218 .cra_driver_name = "asr-cbc-aes",
1219 .cra_priority = 300,
1220 .cra_flags = CRYPTO_ALG_ASYNC,
1221 .cra_blocksize = AES_BLOCK_SIZE,
1222 .cra_ctxsize = sizeof(struct asr_cipher_ctx),
1223 .cra_alignmask = 0xf,
1224 .cra_module = THIS_MODULE,
1225 },
1226 .min_keysize = CIPHER_MIN_KEY_SIZE,
1227 .max_keysize = CIPHER_MAX_KEY_SIZE,
1228 .setkey = asr_cipher_setkey,
1229 .encrypt = asr_aes_cbc_encrypt,
1230 .decrypt = asr_aes_cbc_decrypt,
1231 .init = asr_cipher_init,
1232 .exit = asr_cipher_exit,
1233 .ivsize = AES_BLOCK_SIZE,
1234 },
1235 /* AES - CTR */
1236 {
1237 .base = {
1238 .cra_name = "ctr(aes)",
1239 .cra_driver_name = "asr-ctr-aes",
1240 .cra_priority = 300,
1241 .cra_flags = CRYPTO_ALG_ASYNC,
1242 .cra_blocksize = AES_BLOCK_SIZE,
1243 .cra_ctxsize = sizeof(struct asr_cipher_ctx),
1244 .cra_alignmask = 0xf,
1245 .cra_module = THIS_MODULE,
1246 },
1247 .min_keysize = CIPHER_MIN_KEY_SIZE,
1248 .max_keysize = CIPHER_MAX_KEY_SIZE,
1249 .setkey = asr_cipher_setkey,
1250 .encrypt = asr_aes_ctr_encrypt,
1251 .decrypt = asr_aes_ctr_decrypt,
1252 .init = asr_cipher_init,
1253 .exit = asr_cipher_exit,
1254 .ivsize = AES_BLOCK_SIZE,
1255 },
1256
1257 /* hardware key AES - ECB */
1258 {
1259 .base = {
1260 .cra_name = "ecb(aes-hwkey)",
1261 .cra_driver_name = "asr-ecb-aes",
1262 .cra_priority = 300,
1263 .cra_flags = CRYPTO_ALG_ASYNC,
1264 .cra_blocksize = AES_BLOCK_SIZE,
1265 .cra_ctxsize = sizeof(struct asr_cipher_ctx),
1266 .cra_alignmask = 0xf,
1267 .cra_module = THIS_MODULE,
1268 },
1269 .min_keysize = CIPHER_MIN_KEY_SIZE,
1270 .max_keysize = CIPHER_MAX_KEY_SIZE,
1271 .setkey = asr_cipher_set_hwkey,
1272 .encrypt = asr_aes_ecb_encrypt,
1273 .decrypt = asr_aes_ecb_decrypt,
1274 .init = asr_cipher_hwkey_init,
1275 .exit = asr_cipher_exit,
1276 },
1277 /* AES - CBC */
1278 {
1279 .base = {
1280 .cra_name = "cbc(aes-hwkey)",
1281 .cra_driver_name = "asr-cbc-aes",
1282 .cra_priority = 300,
1283 .cra_flags = CRYPTO_ALG_ASYNC,
1284 .cra_blocksize = AES_BLOCK_SIZE,
1285 .cra_ctxsize = sizeof(struct asr_cipher_ctx),
1286 .cra_alignmask = 0xf,
1287 .cra_module = THIS_MODULE,
1288 },
1289 .min_keysize = CIPHER_MIN_KEY_SIZE,
1290 .max_keysize = CIPHER_MAX_KEY_SIZE,
1291 .setkey = asr_cipher_set_hwkey,
1292 .encrypt = asr_aes_cbc_encrypt,
1293 .decrypt = asr_aes_cbc_decrypt,
1294 .init = asr_cipher_hwkey_init,
1295 .exit = asr_cipher_exit,
1296 .ivsize = AES_BLOCK_SIZE,
1297 },
1298};
1299
1300static void asr_cipher_queue_task(unsigned long data)
1301{
1302 struct asr_bcm_cipher *dd = (struct asr_bcm_cipher *)data;
1303
1304 asr_cipher_handle_queue(dd, NULL);
1305}
1306
1307static void asr_cipher_done_task(unsigned long data)
1308{
1309 struct asr_bcm_cipher *dd = (struct asr_bcm_cipher *)data;
1310
1311 dd->is_async = true;
1312 (void)dd->resume(dd);
1313}
1314
1315int asr_bcm_cipher_register(struct asr_bcm_dev *bcm_dd)
1316{
1317 int err, i, j;
1318 struct device_node *np = NULL;
1319 struct asr_bcm_cipher *cipher_dd;
1320
1321 cipher_dd = &bcm_dd->asr_cipher;
1322 cipher_dd->dev = bcm_dd->dev;
1323 cipher_dd->io_base = bcm_dd->io_base;
1324 cipher_dd->phys_base = bcm_dd->phys_base;
1325
1326 np = cipher_dd->dev->of_node;
1327
1328 cipher_dd->rkek_burned = asr_cipher_rkek_fused(cipher_dd);
1329
1330 asr_cipher_local = cipher_dd;
1331
1332 spin_lock_init(&cipher_dd->lock);
1333 mutex_init(&cipher_dd->cipher_lock);
1334 tasklet_init(&cipher_dd->done_task, asr_cipher_done_task,
1335 (unsigned long)cipher_dd);
1336 tasklet_init(&cipher_dd->queue_task, asr_cipher_queue_task,
1337 (unsigned long)cipher_dd);
1338 crypto_init_queue(&cipher_dd->queue, ASR_CIPHER_QUEUE_LENGTH);
1339
1340 for (i = 0; i < ARRAY_SIZE(cipher_algs); i++) {
1341 err = crypto_register_skcipher(&cipher_algs[i]);
1342 if (err){
1343 for (j = 0; j < i; j++)
1344 crypto_unregister_skcipher(&cipher_algs[j]);
1345 return err;
1346 }
1347 }
1348
1349 return 0;
1350}
1351EXPORT_SYMBOL_GPL(asr_bcm_cipher_register);
1352
1353int asr_bcm_cipher_unregister(struct asr_bcm_dev *bcm_dd)
1354{
1355 int i;
1356 struct asr_bcm_cipher *cipher_dd = &bcm_dd->asr_cipher;
1357
1358 for (i = 0; i < ARRAY_SIZE(cipher_algs); i++)
1359 crypto_unregister_skcipher(&cipher_algs[i]);
1360
1361 tasklet_kill(&cipher_dd->done_task);
1362 tasklet_kill(&cipher_dd->queue_task);
1363
1364 return 0;
1365}
1366EXPORT_SYMBOL_GPL(asr_bcm_cipher_unregister);
1367
1368MODULE_LICENSE("GPL");
1369MODULE_AUTHOR("wangyonggan <yongganwang@asrmicro.com>");
1370MODULE_DESCRIPTION("ASR bcm cipher driver");