blob: 8d5912a0b63b39811ac5de4ba9fa88d9b1c7634b [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2023 ASR Micro Limited
4 *
5 */
6
7#include <linux/module.h>
8#include <linux/kernel.h>
9#include <linux/platform_device.h>
10#include <linux/of.h>
11#include <linux/clk.h>
12#include <linux/io.h>
13#include <linux/slab.h>
14#include <linux/sched.h>
15#include <linux/fs.h>
16#include <linux/uaccess.h>
17#include <linux/errno.h>
18#include <linux/interrupt.h>
19#include <linux/irq.h>
20#ifdef CONFIG_TEE
21#include <linux/tee_drv.h>
22#endif
23#include <linux/crypto.h>
24#include <linux/cputype.h>
25#include <crypto/scatterwalk.h>
26#include <crypto/algapi.h>
27#include <crypto/aes.h>
28#include <crypto/internal/skcipher.h>
29
30#include "asr-bcm-optee.h"
31#include "asr-cipher-optee.h"
32
33struct asr_bcm_cipher *asr_cipher_local;
34
35static struct teec_uuid pta_cipher_uuid = ASR_AES_ACCESS_UUID;
36
37static int asr_optee_cipher_get_rkek_state(u32 *state)
38{
39 struct tee_ioctl_invoke_arg invoke_arg;
40 struct tee_param params[1];
41 struct asrbcm_tee_context asrbcm_tee_ctx;
42 int ret = 0;
43
44 ret = asrbcm_optee_open_ta(&asrbcm_tee_ctx, &pta_cipher_uuid);
45 if (ret != 0) {
46 return ret;
47 }
48
49 memset(&invoke_arg, 0x0, sizeof(struct tee_ioctl_invoke_arg));
50 invoke_arg.func = CMD_AES_HWKEY_STATUS;
51 invoke_arg.session = asrbcm_tee_ctx.session;
52 invoke_arg.num_params = 1;
53
54 params[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT;
55 params[0].u.value.a = 0;
56 params[0].u.value.b = 0;
57 params[0].u.value.c = 0;
58
59 ret = tee_client_invoke_func(asrbcm_tee_ctx.tee_ctx, &invoke_arg, params);
60 if (ret != 0) {
61 goto exit;
62 } else if (invoke_arg.ret != 0) {
63 ret = -EIO;
64 goto exit;
65 }
66
67 *state = params[0].u.value.a;
68
69exit:
70 asrbcm_optee_close_ta(&asrbcm_tee_ctx);
71 return ret;
72
73}
74
75static int asr_optee_cipher_process(uint32_t cipher_mode, uint32_t op_mode,
76 struct scatterlist *src, struct scatterlist *dst,
77 size_t len, uint32_t key_size, u8 *key,
78 u8 *iv, uint32_t ivsize)
79{
80 struct tee_ioctl_invoke_arg invoke_arg;
81 struct tee_param params[4];
82 struct asrbcm_tee_context asrbcm_tee_ctx;
83 struct tee_shm *shm;
84 int ret = 0;
85 char *ma = NULL;
86 uint32_t srclen = len, dstlen = len, paralen_a = key_size, paralen_b = ivsize;
87 uint8_t *parabuf_a = key, *parabuf_b = iv;
88
89 ret = asrbcm_optee_open_ta(&asrbcm_tee_ctx, &pta_cipher_uuid);
90 if (ret != 0) {
91 return ret;
92 }
93
94 memset(&invoke_arg, 0x0, sizeof(struct tee_ioctl_invoke_arg));
95 invoke_arg.func = cipher_mode;
96 invoke_arg.session = asrbcm_tee_ctx.session;
97
98 shm = tee_shm_alloc(asrbcm_tee_ctx.tee_ctx, srclen + dstlen + paralen_a + paralen_b,
99 TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
100 if (!shm) {
101 ret = -EINVAL;
102 goto exit;
103 }
104
105 params[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
106 params[0].u.memref.shm_offs = 0;
107 params[0].u.memref.size = srclen;
108 params[0].u.memref.shm = shm;
109
110 params[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
111 params[1].u.memref.shm_offs = srclen;
112 params[1].u.memref.size = dstlen;
113 params[1].u.memref.shm = shm;
114
115 params[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
116 params[2].u.value.a = op_mode;
117
118 ma = tee_shm_get_va(shm, 0);
119 sg_copy_to_buffer(src, sg_nents(src), ma, srclen);
120 memcpy(ma + srclen + dstlen, parabuf_a, paralen_a);
121
122 /* cbc with iv */
123 if (parabuf_b && paralen_b) {
124 memcpy(ma + srclen + dstlen + paralen_a, parabuf_b, paralen_b);
125 params[2].u.value.b = paralen_a;
126 params[3].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
127 params[3].u.memref.shm_offs = srclen + dstlen;
128 params[3].u.memref.size = paralen_a + paralen_b;
129 params[3].u.memref.shm = shm;
130 invoke_arg.num_params = 4;
131 } else {
132 /* ecb with non iv */
133 params[3].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
134 params[3].u.memref.shm_offs = srclen + dstlen;
135 params[3].u.memref.size = paralen_a;
136 params[3].u.memref.shm = shm;
137 invoke_arg.num_params = 4;
138 }
139
140 ret = tee_client_invoke_func(asrbcm_tee_ctx.tee_ctx, &invoke_arg, params);
141 if (ret != 0) {
142 goto free_shm;
143 } else if (invoke_arg.ret != 0) {
144 ret = -EIO;
145 goto free_shm;
146 }
147 sg_copy_from_buffer(dst, sg_nents(dst), ma + srclen, dstlen);
148
149free_shm:
150 tee_shm_free(shm);
151exit:
152 asrbcm_optee_close_ta(&asrbcm_tee_ctx);
153 return ret;
154}
155
156static int asr_optee_cipher_hwkey_process(uint32_t cipher_mode, uint32_t op_mode,
157 struct scatterlist *src, struct scatterlist *dst,
158 size_t len, uint32_t key_size,
159 u8 *iv, uint32_t ivsize)
160{
161 struct tee_ioctl_invoke_arg invoke_arg;
162 struct tee_param params[4];
163 struct asrbcm_tee_context asrbcm_tee_ctx;
164 struct tee_shm *shm;
165 int ret = 0;
166 char *ma = NULL;
167 uint32_t srclen = len, dstlen = len, paralen = ivsize;
168 uint8_t *parabuf = iv;
169
170
171 ret = asrbcm_optee_open_ta(&asrbcm_tee_ctx, &pta_cipher_uuid);
172 if (ret != 0) {
173 return ret;
174 }
175
176 memset(&invoke_arg, 0x0, sizeof(struct tee_ioctl_invoke_arg));
177 invoke_arg.func = cipher_mode;
178 invoke_arg.session = asrbcm_tee_ctx.session;
179
180 shm = tee_shm_alloc(asrbcm_tee_ctx.tee_ctx, srclen + dstlen + paralen, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
181 if (!shm) {
182 ret = -EINVAL;
183 goto exit;
184 }
185
186 params[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
187 params[0].u.memref.shm_offs = 0;
188 params[0].u.memref.size = srclen;
189 params[0].u.memref.shm = shm;
190
191 params[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
192 params[1].u.memref.shm_offs = srclen;
193 params[1].u.memref.size = dstlen;
194 params[1].u.memref.shm = shm;
195
196 params[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
197 params[2].u.value.a = key_size;
198 params[2].u.value.b = op_mode;
199 params[2].u.value.c = 0;
200
201 ma = tee_shm_get_va(shm, 0);
202 sg_copy_to_buffer(src, sg_nents(src), ma, srclen);
203 if (parabuf && paralen) {
204 params[3].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
205 params[3].u.memref.shm_offs = srclen + dstlen;
206 params[3].u.memref.size = paralen;
207 params[3].u.memref.shm = shm;
208 memcpy(ma + srclen + dstlen, parabuf, paralen);
209 invoke_arg.num_params = 4;
210 } else {
211 invoke_arg.num_params = 3;
212 }
213
214 ret = tee_client_invoke_func(asrbcm_tee_ctx.tee_ctx, &invoke_arg, params);
215 if (ret != 0) {
216 goto free_shm;
217 } else if (invoke_arg.ret != 0) {
218 ret = -EIO;
219 goto free_shm;
220 }
221 sg_copy_from_buffer(dst, sg_nents(dst), ma + srclen, dstlen);
222
223free_shm:
224 tee_shm_free(shm);
225exit:
226 asrbcm_optee_close_ta(&asrbcm_tee_ctx);
227 return ret;
228}
229
230static inline void asr_cipher_set_mode(struct asr_bcm_cipher *dd,
231 const struct asr_cipher_reqctx *rctx)
232{
233 /* Clear all but persistent flags and set request flags. */
234 dd->flags = (dd->flags & CIPHER_FLAGS_PERSISTENT) | rctx->mode;
235}
236
237static void asr_cipher_set_iv_as_last_ciphertext_block(struct asr_bcm_cipher *dd)
238{
239 struct skcipher_request *req = skcipher_request_cast(dd->areq);
240 struct asr_cipher_reqctx *rctx = skcipher_request_ctx(req);
241 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
242 unsigned int ivsize = crypto_skcipher_ivsize(cipher);
243
244 if (req->cryptlen < ivsize)
245 return;
246
247 if (rctx->mode & FLAGS_ENCRYPT) {
248 scatterwalk_map_and_copy(req->iv, req->dst,
249 req->cryptlen - ivsize, ivsize, 0);
250 } else {
251 if (req->src == req->dst)
252 memcpy(req->iv, rctx->lastc, ivsize);
253 else
254 scatterwalk_map_and_copy(req->iv, req->src,
255 req->cryptlen - ivsize,
256 ivsize, 0);
257 }
258}
259
260static int asr_cipher_handle_queue(struct asr_bcm_cipher *dd,
261 struct crypto_async_request *new_areq)
262{
263 struct crypto_async_request *areq, *backlog;
264 struct asr_cipher_ctx *ctx;
265 unsigned long flags;
266 bool start_async;
267 int err, ret = 0;
268
269 spin_lock_irqsave(&dd->lock, flags);
270 if (new_areq)
271 ret = crypto_enqueue_request(&dd->queue, new_areq);
272 if (dd->flags & FLAGS_BUSY) {
273 spin_unlock_irqrestore(&dd->lock, flags);
274 return ret;
275 }
276
277 backlog = crypto_get_backlog(&dd->queue);
278 areq = crypto_dequeue_request(&dd->queue);
279 if (areq) {
280 dd->flags |= FLAGS_BUSY;
281 }
282 spin_unlock_irqrestore(&dd->lock, flags);
283
284 if (!areq)
285 return ret;
286
287 if (backlog)
288 backlog->complete(backlog, -EINPROGRESS);
289
290 ctx = crypto_tfm_ctx(areq->tfm);
291
292 dd->areq = areq;
293 dd->ctx = ctx;
294 start_async = (areq != new_areq);
295 dd->is_async = start_async;
296
297 /* WARNING: ctx->start() MAY change dd->is_async. */
298 err = ctx->start(dd);
299 return (start_async) ? ret : err;
300}
301
302static inline int asr_cipher_complete(struct asr_bcm_cipher *dd, int err)
303{
304
305 dd->flags &= ~FLAGS_BUSY;
306
307 asr_cipher_set_iv_as_last_ciphertext_block(dd);
308
309 if (dd->is_async)
310 dd->areq->complete(dd->areq, err);
311
312 tasklet_schedule(&dd->queue_task);
313
314 return err;
315}
316
317static int asr_cipher_start(struct asr_bcm_cipher *dd)
318{
319 struct skcipher_request *req = skcipher_request_cast(dd->areq);
320 struct asr_cipher_reqctx *rctx = skcipher_request_ctx(req);
321 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
322 u8 *iv;
323 u32 flags, cipher_mode, op_mode, keylen, ivsize;
324 int err;
325
326 asr_cipher_set_mode(dd, rctx);
327
328 flags = dd->flags;
329
330 if ((flags & FLAGS_OPMODE_MASK) == FLAGS_CBC){
331 if (rctx->use_rkek) {
332 cipher_mode = CMD_AES_HWKEY_CBC;
333 } else{
334 cipher_mode = CMD_AES_CBC;
335 }
336 ivsize = crypto_skcipher_ivsize(cipher);
337 iv = req->iv;
338 }
339 else {
340 iv = NULL;
341 ivsize = 0;
342 if (rctx->use_rkek) {
343 cipher_mode = CMD_AES_HWKEY_ECB;
344 } else {
345 cipher_mode = CMD_AES_ECB;
346 }
347 }
348
349 if (flags & FLAGS_ENCRYPT)
350 op_mode = 1;
351 else
352 op_mode = 0;
353
354 keylen = dd->ctx->keylen;
355
356 if (rctx->use_rkek) {
357 err = asr_optee_cipher_hwkey_process(cipher_mode, op_mode, req->src,
358 req->dst, req->cryptlen, keylen, iv, ivsize);
359 } else {
360 err = asr_optee_cipher_process(cipher_mode, op_mode, req->src,
361 req->dst, req->cryptlen, keylen, (u8 *)dd->ctx->key, iv, ivsize);
362 }
363
364 return asr_cipher_complete(dd, err);
365}
366
367static int asr_cipher(struct skcipher_request *req, unsigned long mode)
368{
369 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
370 struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
371 struct asr_cipher_reqctx *rctx;
372 struct asr_bcm_cipher *dd = asr_cipher_local;
373
374 ctx->block_size = AES_BLOCK_SIZE;
375 ctx->dd = dd;
376
377 rctx = skcipher_request_ctx(req);
378 rctx->mode = mode;
379 rctx->use_rkek = ctx->use_rkek;
380
381 if (!(mode) && (req->src == req->dst)) {
382 unsigned int ivsize = crypto_skcipher_ivsize(cipher);
383 if (req->cryptlen >= ivsize) {
384 scatterwalk_map_and_copy(rctx->lastc, req->src,
385 req->cryptlen - ivsize,
386 ivsize, 0);
387 }
388 }
389
390 return asr_cipher_handle_queue(dd, &req->base);
391}
392
393static int asr_cipher_setkey(struct crypto_skcipher *cipher, const u8 *key,
394 unsigned int keylen)
395{
396 struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
397 struct asr_bcm_cipher *dd = asr_cipher_local;
398
399 ctx->dd = dd;
400 ctx->use_rkek = false;
401
402 if (keylen != AES_KEYSIZE_128 &&
403 keylen != AES_KEYSIZE_192 &&
404 keylen != AES_KEYSIZE_256) {
405 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
406 return -EINVAL;
407 }
408
409 memcpy(ctx->key, key, keylen);
410 ctx->keylen = keylen;
411
412 return 0;
413}
414
415static int asr_cipher_set_hwkey(struct crypto_skcipher *cipher, const u8 *key,
416 unsigned int keylen)
417{
418 struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
419 struct asr_bcm_cipher *dd = asr_cipher_local;
420
421 (void)key; /* ignore the sw key */
422
423 if (!dd->rkek_burned)
424 return -EPERM;
425
426 if (keylen != AES_KEYSIZE_128 &&
427 keylen != AES_KEYSIZE_192 &&
428 keylen != AES_KEYSIZE_256) {
429 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
430 return -EINVAL;
431 }
432
433 ctx->keylen = keylen;
434
435 return 0;
436}
437
438static int asr_aes_ecb_encrypt(struct skcipher_request *req)
439{
440 return asr_cipher(req, FLAGS_ECB | FLAGS_ENCRYPT);
441}
442
443static int asr_aes_ecb_decrypt(struct skcipher_request *req)
444{
445 return asr_cipher(req, FLAGS_ECB);
446}
447
448static int asr_aes_cbc_encrypt(struct skcipher_request *req)
449{
450 return asr_cipher(req, FLAGS_CBC | FLAGS_ENCRYPT);
451}
452
453static int asr_aes_cbc_decrypt(struct skcipher_request *req)
454{
455 return asr_cipher(req, FLAGS_CBC);
456}
457
458static int asr_cipher_init(struct crypto_skcipher *tfm)
459{
460 struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
461
462 tfm->reqsize = sizeof(struct asr_cipher_reqctx);
463 ctx->start = asr_cipher_start;
464
465 return 0;
466}
467
468static int asr_cipher_hwkey_init(struct crypto_skcipher *tfm)
469{
470 struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
471 struct asr_bcm_cipher *dd = asr_cipher_local;
472
473 if (!dd->rkek_burned)
474 return -EPERM;
475
476 tfm->reqsize = sizeof(struct asr_cipher_reqctx);
477 ctx->start = asr_cipher_start;
478
479 return 0;
480}
481
482static void asr_cipher_exit(struct crypto_skcipher *tfm)
483{
484 struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
485
486 memset(ctx, 0, sizeof(*ctx));
487}
488
489static void asr_cipher_queue_task(unsigned long data)
490{
491 struct asr_bcm_cipher *dd = (struct asr_bcm_cipher *)data;
492
493 asr_cipher_handle_queue(dd, NULL);
494}
495
496static struct skcipher_alg cipher_algs[] = {
497 /* AES - ECB, using input key*/
498 {
499 .base = {
500 .cra_name = "ecb(aes)",
501 .cra_driver_name = "asr-ecb-aes",
502 .cra_priority = 300,
503 .cra_flags = CRYPTO_ALG_ASYNC,
504 .cra_blocksize = AES_BLOCK_SIZE,
505 .cra_ctxsize = sizeof(struct asr_cipher_ctx),
506 .cra_alignmask = 0xf,
507 .cra_module = THIS_MODULE,
508 },
509 .min_keysize = AES_MIN_KEY_SIZE,
510 .max_keysize = AES_MAX_KEY_SIZE,
511 .setkey = asr_cipher_setkey,
512 .encrypt = asr_aes_ecb_encrypt,
513 .decrypt = asr_aes_ecb_decrypt,
514 .init = asr_cipher_init,
515 .exit = asr_cipher_exit,
516 },
517 /* AES - CBC, using input key,*/
518 {
519 .base = {
520 .cra_name = "cbc(aes)",
521 .cra_driver_name = "asr-cbc-aes",
522 .cra_priority = 300,
523 .cra_flags = CRYPTO_ALG_ASYNC,
524 .cra_blocksize = AES_BLOCK_SIZE,
525 .cra_ctxsize = sizeof(struct asr_cipher_ctx),
526 .cra_alignmask = 0xf,
527 .cra_module = THIS_MODULE,
528 },
529 .min_keysize = AES_MIN_KEY_SIZE,
530 .max_keysize = AES_MAX_KEY_SIZE,
531 .setkey = asr_cipher_setkey,
532 .encrypt = asr_aes_cbc_encrypt,
533 .decrypt = asr_aes_cbc_decrypt,
534 .init = asr_cipher_init,
535 .exit = asr_cipher_exit,
536 .ivsize = AES_BLOCK_SIZE,
537 },
538 /* AES - ECB, using hardware key, a.k.a. RKEK */
539 {
540 .base = {
541 .cra_name = "ecb(aes-hwkey)",
542 .cra_driver_name = "asr-ecb-aes-hwkey",
543 .cra_priority = 300,
544 .cra_flags = CRYPTO_ALG_ASYNC,
545 .cra_blocksize = AES_BLOCK_SIZE,
546 .cra_ctxsize = sizeof(struct asr_cipher_ctx),
547 .cra_alignmask = 0xf,
548 .cra_module = THIS_MODULE,
549 },
550 .min_keysize = AES_MIN_KEY_SIZE,
551 .max_keysize = AES_MAX_KEY_SIZE,
552 .setkey = asr_cipher_set_hwkey,
553 .encrypt = asr_aes_ecb_encrypt,
554 .decrypt = asr_aes_ecb_decrypt,
555 .init = asr_cipher_hwkey_init,
556 .exit = asr_cipher_exit,
557 },
558 /* AES - CBC, using hardware key, a.k.a. RKEK */
559 {
560 .base = {
561 .cra_name = "cbc(aes-hwkey)",
562 .cra_driver_name = "asr-cbc-aes-hwkey",
563 .cra_priority = 300,
564 .cra_flags = CRYPTO_ALG_ASYNC,
565 .cra_blocksize = AES_BLOCK_SIZE,
566 .cra_ctxsize = sizeof(struct asr_cipher_ctx),
567 .cra_alignmask = 0xf,
568 .cra_module = THIS_MODULE,
569 },
570 .min_keysize = AES_MIN_KEY_SIZE,
571 .max_keysize = AES_MAX_KEY_SIZE,
572 .setkey = asr_cipher_set_hwkey,
573 .encrypt = asr_aes_cbc_encrypt,
574 .decrypt = asr_aes_cbc_decrypt,
575 .init = asr_cipher_hwkey_init,
576 .exit = asr_cipher_exit,
577 .ivsize = AES_BLOCK_SIZE,
578 },
579};
580
581int asr_bcm_cipher_register(struct asr_bcm_dev *bcm_dd)
582{
583 int i, j, err;
584 struct asr_bcm_cipher *cipher_dd;
585 struct device *dev = bcm_dd->dev;
586 u32 rkek_state;
587
588 cipher_dd = &bcm_dd->asr_cipher;
589 cipher_dd->dev = bcm_dd->dev;
590
591 asr_cipher_local = cipher_dd;
592
593 err = asr_optee_cipher_get_rkek_state(&rkek_state);
594 if (err) {
595 dev_warn(dev, "can't get hwkey(rkek) state\n");
596 cipher_dd->rkek_burned = 0;
597 } else {
598 if (rkek_state)
599 cipher_dd->rkek_burned = 1;
600 else
601 cipher_dd->rkek_burned = 0;
602 switch (rkek_state) {
603 case 2:
604 dev_warn(dev, "hwkey(rkek) burned, SW access not disabled\n");
605 break;
606 case 1:
607 dev_warn(dev, "hwkey(rkek) burned, SW access disabled\n");
608 break;
609 case 0:
610 dev_warn(dev, "hwkey(rkek) not burned\n");
611 break;
612 }
613 }
614
615 spin_lock_init(&cipher_dd->lock);
616 tasklet_init(&cipher_dd->queue_task, asr_cipher_queue_task,
617 (unsigned long)cipher_dd);
618
619 crypto_init_queue(&cipher_dd->queue, ASR_CIPHER_QUEUE_LENGTH);
620
621 for (i = 0; i < ARRAY_SIZE(cipher_algs); i++) {
622 err = crypto_register_skcipher(&cipher_algs[i]);
623 if (err){
624 for (j = 0; j < i; j++)
625 crypto_unregister_skcipher(&cipher_algs[j]);
626 return err;
627 }
628 }
629
630 return 0;
631}
632
633int asr_bcm_cipher_unregister(struct asr_bcm_dev *bcm_dd)
634{
635 int i;
636 struct asr_bcm_cipher *cipher_dd = &bcm_dd->asr_cipher;
637 struct device *dev = bcm_dd->dev;
638
639 for (i = 0; i < ARRAY_SIZE(cipher_algs); i++)
640 crypto_unregister_skcipher(&cipher_algs[i]);
641
642 tasklet_kill(&cipher_dd->queue_task);
643
644 devm_kfree(dev, cipher_dd);
645
646 return 0;
647}
648
649MODULE_DESCRIPTION("ASR HWKey CIPHER driver with optee-os.");
650MODULE_LICENSE("GPL v2");
651MODULE_AUTHOR("Yonggan Wang");