blob: 582aae7224e4a7402062331bd2c26f3a771be0f0 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/slab.h>
4#include <linux/err.h>
5#include <linux/clk-provider.h>
6#include <linux/clk.h>
7#include <linux/io.h>
8#include <linux/hw_random.h>
9#include <linux/platform_device.h>
10#include <linux/bitops.h>
11#include <linux/device.h>
12#include <linux/init.h>
13#include <linux/errno.h>
14#include <linux/interrupt.h>
15#include <linux/irq.h>
16#include <linux/scatterlist.h>
17#include <linux/dma-mapping.h>
18#include <linux/dmaengine.h>
19#include <linux/of_device.h>
20#include <linux/of_address.h>
21#include <linux/delay.h>
22#include <linux/crypto.h>
23#include <linux/cputype.h>
24#include <crypto/scatterwalk.h>
25#include <crypto/algapi.h>
26#include <linux/jiffies.h>
27#include <crypto/aes.h>
28#include <crypto/sm4.h>
29#include <crypto/internal/skcipher.h>
30#include "asr-te200.h"
31#include "asr-cipher.h"
32
33#define CIPHER_BLOCK_SIZE AES_BLOCK_SIZE
34#define CIPHER_MIN_KEY_SIZE AES_MIN_KEY_SIZE
35#define CIPHER_MAX_KEY_SIZE AES_MAX_KEY_SIZE
36
37static struct asr_te200_cipher *asr_cipher_local = NULL;
38
39static inline u32 asr_cipher_read(struct asr_te200_cipher *dd, u32 offset)
40{
41 u32 value = readl_relaxed(dd->io_base + offset);
42
43 return value;
44}
45
46static inline void asr_cipher_write(struct asr_te200_cipher *dd,
47 u32 offset, u32 value)
48{
49 writel_relaxed(value, dd->io_base + offset);
50}
51
52static int sca_clock_switch(struct asr_te200_cipher *dd, int enable)
53{
54 uint32_t value;
55 value = asr_cipher_read(dd, TE200_CLOCK_CTRL);
56 if (enable) {
57 value |= SCA_CLK_EN;
58 } else {
59 value &= ~SCA_CLK_EN;
60 }
61 asr_cipher_write(dd, TE200_CLOCK_CTRL, value);
62 return 0;
63}
64
65static int sca_start_run(struct asr_te200_cipher *dd)
66{
67 uint32_t value;
68 value = asr_cipher_read(dd, TE200_SSCA_CTRL);
69 value |= SCA_RUN;
70 asr_cipher_write(dd, TE200_SSCA_CTRL, value);
71 return 0;
72}
73
74static int sca_set_alg(int alg_type, uint32_t *value)
75{
76 switch (alg_type) {
77 case NORMAL_AES:
78 *value &= SCA_NORMAL_AES;
79 break;
80 case SM4:
81 *value |= SCA_SM4;
82 break;
83 default:
84 return -1;
85 }
86 return 0;
87}
88
89static int sca_set_cipher_mode(int mode, uint32_t *value)
90{
91 switch (mode) {
92 case ECB:
93 *value &= SCA_MODE_ECB;
94 break;
95 case CTR:
96 *value |= SCA_MODE_CTR;
97 break;
98 case CBC:
99 *value |= SCA_MODE_CBC;
100 break;
101 default:
102 return -1;
103 }
104 return 0;
105}
106
107static int sca_set_iv(const uint8_t *iv, uint32_t *value)
108{
109 if (iv) {
110 *value |= SCA_SET_IV | SCA_SET_IV_ADDR;
111 } else {
112 *value &= (~(SCA_SET_IV | SCA_SET_IV_ADDR));
113 }
114 return 0;
115}
116
117static int sca_set_key(const uint8_t *key, uint32_t key_len, uint32_t *value)
118{
119 switch (key_len) {
120 case 16:
121 *value &= SCA_KEY_128_BITS;
122 break;
123 case 24:
124 *value |= SCA_KEY_192_BITS;
125 break;
126 case 32:
127 *value |= SCA_KEY_256_BITS;
128 break;
129 default:
130 return -1;
131 }
132
133 if (key) {
134 *value |= SCA_EXTERNAL_KEY | SCA_KEY_IS_ADDR;
135 } else {
136 *value |= SCA_DEVICE_ROOT_KEY | SCA_KEY_IS_ADDR;
137 }
138
139 return 0;
140}
141
142static int sca_wait_intr(struct asr_te200_cipher *dd)
143{
144 int ret = 0;
145 uint32_t value;
146 uint32_t time_start;
147 time_start = jiffies;
148 value = asr_cipher_read(dd, TE200_SSCA_INTR_STAT);
149
150 while (1) {
151 value = asr_cipher_read(dd, TE200_SSCA_INTR_STAT);
152
153 if (value & SCA_INVALID_CMD) {
154 dev_err(dd->dev, "invallid cmd\n");
155 ret = -1;
156 break;
157 }
158
159 if (value & SCA_INVALID_KEY) {
160 dev_err(dd->dev, "invallid key\n");
161 ret = -1;
162 break;
163 }
164
165 if (value & SCA_BUS_ERROR) {
166 dev_err(dd->dev, "bus err\n");
167 ret = -1;
168 break;
169 }
170
171 if ((jiffies - time_start) > 500) {
172 dev_err(dd->dev, "wait intr timeout !\n");
173 ret = -1;
174 break;
175 }
176
177 if (value & SCA_CMD_INTR) {
178 break;
179 }
180 }
181
182 value = asr_cipher_read(dd, TE200_SSCA_INTR_STAT);
183 value |= SCA_CMD_INTR;
184 asr_cipher_write(dd, TE200_SSCA_INTR_STAT, value);
185 return ret;
186}
187
188static inline void cipher_cache_operation(void *addr, int size)
189{
190 __cpuc_flush_dcache_area(addr, size);
191}
192
193/* sync the same key ladder in /tos/uboot/kernel te200 driver */
194static const struct {
195 __attribute__ ((aligned (16))) uint8_t ek3[16];
196 __attribute__ ((aligned (16))) uint8_t ek2[16];
197 __attribute__ ((aligned (16))) uint8_t ek1[16];
198} key_ladder = {
199 { 0x50,0xCF,0x0F,0x29,0xD1,0xCF,0x32,0x41,0xC5,0x64,0xAC,0xDB,0xDD,0x9A,0xFC,0xF4 },
200 { 0x9C,0xAB,0x04,0x57,0xB7,0x17,0xD9,0x4A,0x34,0x74,0x28,0x30,0x34,0x16,0x3B,0x52 },
201 { 0xF5,0xA0,0x33,0x7B,0x4B,0xE8,0x18,0x84,0x51,0x4E,0x38,0x86,0x6D,0x08,0xBB,0x6E },
202};
203
204static int rkek_cfg_init(struct asr_te200_cipher *dd)
205{
206#define SYS_SEC_CTRL0 (0x0C)
207
208 uint32_t value;
209 struct device_node *np;
210 struct resource res;
211 void __iomem *io_base;
212
213 value = asr_cipher_read(dd, TE200_CLOCK_CTRL);
214 value &= ~OTP_CLK_EN;
215 asr_cipher_write(dd, TE200_CLOCK_CTRL, value);
216
217 value = asr_cipher_read(dd, TE200_CLOCK_CTRL);
218 value |= OTP_CLK_EN;
219 asr_cipher_read(dd, TE200_CLOCK_CTRL);
220
221 /* set opt key sel */
222 np = of_find_compatible_node(NULL, NULL, "marvell,mmp-ciu");
223 if (!np) {
224 dev_err(dd->dev, "can't find ciu node for set opt key sel");
225 return -1;
226 }
227
228 if (of_address_to_resource(np, 0, &res)) {
229 return -1;
230 }
231
232 io_base = ioremap(res.start, res.end - res.start);
233 if (!io_base) {
234 dev_err(dd->dev, "geu regs can't remap");
235 return 0;
236 }
237
238 value = readl_relaxed(io_base + SYS_SEC_CTRL0);
239 value |= (1 << 24);
240 writel_relaxed(value, io_base + SYS_SEC_CTRL0);
241
242 /* enable lock */
243 value = asr_cipher_read(dd, TE200_OTP_DUMMY_CFG);
244 value |= 0x10;
245 asr_cipher_write(dd, TE200_OTP_DUMMY_CFG, value);
246
247 iounmap(io_base);
248 return 0;
249}
250
251static int sca_cipher_init(struct asr_te200_cipher *dd, int alg_type, int mode,
252 const uint8_t *iv, const uint8_t *key, uint32_t key_len)
253{
254 int ret;
255 uint32_t cmd = 0;
256 uint32_t param;
257 uint32_t key_phys;
258 uint32_t iv_phys;
259
260 sca_start_run(dd);
261
262 ret = sca_set_alg(alg_type, &cmd);
263 if (ret) {
264 return -1;
265 }
266 ret = sca_set_cipher_mode(mode, &cmd);
267 if (ret) {
268 return -1;
269 }
270
271 ret = sca_set_key(key, key_len, &cmd);
272 if (ret) {
273 return -1;
274 }
275
276 if (iv && ((mode == CBC) || (mode == CTR))) {
277 ret = sca_set_iv(iv, &cmd);
278 if (ret) {
279 return -1;
280 }
281 }
282
283 cmd |= SCA_INTER_TRIGGERD;
284 cmd |= SCA_INIT_CMD;
285
286 asr_cipher_write(dd, TE200_SSCA_QUEUE, cmd);
287
288 /* set key params */
289 if (key) {
290 key_phys = virt_to_phys((void *)key);
291 cipher_cache_operation((void*)key, key_len);
292 param = (uint32_t)key_phys;
293 asr_cipher_write(dd, TE200_SSCA_QUEUE, param);
294 } else { /* use rkek */
295 key_phys = virt_to_phys((void *)key_ladder.ek3);
296 cipher_cache_operation((void*)key_ladder.ek3, key_len);
297 param = (uint32_t)key_phys;
298 asr_cipher_write(dd, TE200_SSCA_QUEUE, param);
299
300 key_phys = virt_to_phys((void *)key_ladder.ek2);
301 cipher_cache_operation((void*)key_ladder.ek2, key_len);
302 param = (uint32_t)key_phys;
303 asr_cipher_write(dd, TE200_SSCA_QUEUE, param);
304
305 key_phys = virt_to_phys((void *)key_ladder.ek1);
306 cipher_cache_operation((void*)key_ladder.ek1, key_len);
307 param = (uint32_t)key_phys;
308 asr_cipher_write(dd, TE200_SSCA_QUEUE, param);
309 }
310
311 /* set iv params */
312 if (iv && ((mode == CBC) || (mode == CTR))) {
313 /* set iv addr */
314 iv_phys = virt_to_phys((void *)iv);
315 cipher_cache_operation((void*)iv, 16);
316 param = (uint32_t)iv_phys;
317 asr_cipher_write(dd, TE200_SSCA_QUEUE, param);
318 }
319
320 return sca_wait_intr(dd);
321}
322
323static int sca_cipher_process(struct asr_te200_cipher *dd, int encrypt,
324 int last_one, const void *in, uint32_t size, void *out)
325{
326 uint32_t cmd = 0;
327 uint32_t param;
328 uint8_t *psrc = (uint8_t *)in;
329 uint8_t *pdst = (uint8_t *)out;
330 uint32_t in_phys, out_phys;
331 uint32_t len;
332
333 len = (size + 0xf) & (~0xf);
334
335 /* set big endain */
336 if (encrypt) {
337 cmd |= SCA_ENCRYPTION;
338 } else {
339 cmd &= (~SCA_ENCRYPTION);
340 }
341
342 cmd |= SCA_INTER_TRIGGERD;
343 cmd |= SCA_PROCESS_CMD;
344 if (last_one) {
345 cmd |= SCA_LAST_ONE_SESSION;
346 } else {
347 cmd &= ~SCA_LAST_ONE_SESSION;
348 }
349 asr_cipher_write(dd, TE200_SSCA_QUEUE, cmd);
350
351 in_phys = (uint32_t)virt_to_phys((void *)psrc);
352 out_phys = (uint32_t)virt_to_phys((void *)pdst);
353 cipher_cache_operation((void*)psrc, len);
354 cipher_cache_operation((void*)pdst, len);
355
356 /* set src addr */
357 param = (uint32_t)in_phys;
358 asr_cipher_write(dd, TE200_SSCA_QUEUE, param);
359
360 /* set data length */
361 param = (uint32_t)size;
362 asr_cipher_write(dd, TE200_SSCA_QUEUE, param);
363
364 /* set dst addr */
365 if (out_phys) {
366 param = (uint32_t)out_phys;
367 asr_cipher_write(dd, TE200_SSCA_QUEUE, param);
368 }
369
370 sca_start_run(dd);
371 return sca_wait_intr(dd);
372}
373
374static int sca_cipher_finish(struct asr_te200_cipher *dd)
375{
376 uint32_t cmd = 0;
377 /* set cmd*/
378 cmd |= SCA_INTER_TRIGGERD;
379 cmd |= SCA_FINISH_CMD;
380 asr_cipher_write(dd, TE200_SSCA_QUEUE, cmd);
381
382 sca_start_run(dd);
383 return sca_wait_intr(dd);
384}
385
386static int asr_cipher_hw_init(struct asr_te200_cipher *dd)
387{
388 asr_cipher_write(dd, TE200_SSCA_INTR_MSK, 0x1f);
389 return 0;
390}
391
392static int sca_cipher_handle(struct asr_te200_cipher *dd, struct sca_data *psca_data, const uint8_t *iv,
393 const uint8_t *key, uint32_t key_len, const void *in, uint32_t size, void *out)
394{
395 int ret = 0;
396
397 sca_clock_switch(dd, 0);
398 sca_clock_switch(dd, 1);
399
400 ret = asr_cipher_hw_init(dd);
401 if (ret) {
402 goto exit;
403 }
404
405 if (psca_data->use_rkek) {
406 ret = rkek_cfg_init(dd);
407 if (ret) {
408 goto exit;
409 }
410 ret = sca_cipher_init(dd, psca_data->alg_type, psca_data->mode, iv, NULL, key_len);
411 } else {
412 ret = sca_cipher_init(dd, psca_data->alg_type, psca_data->mode, iv, key, key_len);
413 }
414 if (ret) {
415 goto exit;
416 }
417
418 ret = sca_cipher_process(dd, psca_data->encrypt, 1, in, size, out);
419 if (ret) {
420 goto exit;
421 }
422
423 ret = sca_cipher_finish(dd);
424 if (ret) {
425 goto exit;
426 }
427
428exit:
429 sca_clock_switch(dd, 0);
430 return ret;
431}
432
433static void asr_cipher_set_iv_as_last_ciphertext_block(struct asr_te200_cipher*dd)
434{
435 struct skcipher_request *req = skcipher_request_cast(dd->areq);
436 struct asr_cipher_reqctx *rctx = skcipher_request_ctx(req);
437 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
438 unsigned int ivsize = crypto_skcipher_ivsize(cipher);
439
440 if (req->cryptlen < ivsize)
441 return;
442
443 if (rctx->mode & FLAGS_ENCRYPT) {
444 scatterwalk_map_and_copy(req->iv, req->dst,
445 req->cryptlen - ivsize, ivsize, 0);
446 } else {
447 if (req->src == req->dst)
448 memcpy(req->iv, rctx->lastc, ivsize);
449 else
450 scatterwalk_map_and_copy(req->iv, req->src,
451 req->cryptlen - ivsize,
452 ivsize, 0);
453 }
454}
455
456static int asr_cipher_complete(struct asr_te200_cipher *dd, int err)
457{
458 struct asr_te200_dev *te200_dd = dev_get_drvdata(dd->dev);
459 struct asr_te200_ops *te200_ops = te200_dd->te200_ops;
460
461 dd->flags &= ~FLAGS_BUSY;
462
463 asr_cipher_set_iv_as_last_ciphertext_block(dd);
464
465 if (dd->is_async)
466 dd->areq->complete(dd->areq, err);
467
468 te200_ops->dev_put(te200_dd);
469
470 tasklet_schedule(&dd->queue_task);
471
472 return err;
473}
474
475static int asr_complete(struct asr_te200_cipher *dd)
476{
477 return asr_cipher_complete(dd, 0);
478}
479
480static inline size_t asr_cipher_padlen(size_t len, size_t block_size)
481{
482 len &= block_size - 1;
483 return len ? block_size - len : 0;
484}
485
486static void get_sca_data_info(struct sca_data *psca_data, struct asr_te200_cipher *dd)
487{
488
489 psca_data->alg_type = (dd->flags & FLAGS_SM4) ? SM4 : NORMAL_AES;
490 psca_data->encrypt = (dd->flags & FLAGS_ENCRYPT) ? 1 : 0;
491
492 if ((dd->flags & FLAGS_OPMODE_MASK) == FLAGS_ECB)
493 psca_data->mode = ECB;
494 else if ((dd->flags & FLAGS_OPMODE_MASK) == FLAGS_CBC)
495 psca_data->mode = CBC;
496 else if ((dd->flags & FLAGS_OPMODE_MASK) == FLAGS_CTR)
497 psca_data->mode = CTR;
498}
499
500static int asr_cipher_buff_init(struct asr_te200_cipher *dd, uint32_t len)
501{
502 dd->buf = (void *)__get_free_pages(GFP_KERNEL, get_order(len));
503
504 if (!dd->buf) {
505 dev_err(dd->dev, "unable to alloc pages.\n");
506 return -ENOMEM;
507 }
508
509 dd->buflen = PAGE_SIZE << get_order(len);
510
511 return 0;
512}
513
514static void asr_cipher_buff_cleanup(struct asr_te200_cipher *dd, uint32_t len)
515{
516 free_pages((unsigned long)dd->buf, get_order(len));
517 dd->buflen = 0;
518}
519
520static inline void asr_cipher_get(struct asr_te200_cipher *dd)
521{
522 mutex_lock(&dd->cipher_lock);
523}
524
525static inline void asr_cipher_put(struct asr_te200_cipher *dd)
526{
527 if(mutex_is_locked(&dd->cipher_lock))
528 mutex_unlock(&dd->cipher_lock);
529}
530
531static int asr_sca_cipher_process(struct asr_te200_cipher *dd,
532 struct skcipher_request *req, asr_cipher_fn_t resume)
533{
534 int ret;
535 struct sca_data sca_data = {0};
536 size_t padlen = asr_cipher_padlen(req->cryptlen, CIPHER_BLOCK_SIZE);
537 struct asr_cipher_reqctx *rctx = skcipher_request_ctx(req);
538
539 asr_cipher_get(dd);
540
541 if (unlikely(req->cryptlen == 0)) {
542 asr_cipher_put(dd);
543 return -EINVAL;
544 }
545
546 dd->datalen = req->cryptlen + padlen;
547 ret = asr_cipher_buff_init(dd, dd->datalen);
548 if (ret) {
549 asr_cipher_put(dd);
550 return ret;
551 }
552
553 sg_copy_to_buffer(req->src, sg_nents(req->src), dd->buf, req->cryptlen);
554
555 dd->total = req->cryptlen;
556 dd->real_dst = req->dst;
557 dd->resume = resume;
558 dd->data = (u32 *)dd->buf;
559
560 get_sca_data_info(&sca_data, dd);
561 sca_data.use_rkek = rctx->use_rkek;
562 ret = sca_cipher_handle(dd, &sca_data, req->iv, (uint8_t *)dd->ctx->key,
563 dd->ctx->keylen, dd->data, dd->datalen, dd->data);
564 if (!sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
565 dd->buf, dd->total))
566 ret = -EINVAL;
567
568 asr_cipher_buff_cleanup(dd, dd->datalen);
569 asr_cipher_put(dd);
570
571 return asr_cipher_complete(dd, ret);
572}
573
574static inline void asr_cipher_set_mode(struct asr_te200_cipher *dd,
575 const struct asr_cipher_reqctx *rctx)
576{
577 /* Clear all but persistent flags and set request flags. */
578 dd->flags = (dd->flags & CIPHER_FLAGS_PERSISTENT) | rctx->mode;
579}
580
581static int asr_cipher_start(struct asr_te200_cipher *dd)
582{
583 struct skcipher_request *req = skcipher_request_cast(dd->areq);
584 struct asr_cipher_reqctx *rctx = skcipher_request_ctx(req);
585 struct asr_te200_dev *te200_dd = dev_get_drvdata(dd->dev);
586 struct asr_te200_ops *te200_ops = te200_dd->te200_ops;
587
588 te200_ops->dev_get(te200_dd);
589
590 asr_cipher_set_mode(dd, rctx);
591 return asr_sca_cipher_process(dd, req, asr_complete);
592}
593
594static int asr_cipher_handle_queue(struct asr_te200_cipher *dd,
595 struct crypto_async_request *new_areq)
596{
597 struct crypto_async_request *areq, *backlog;
598 struct asr_cipher_ctx *ctx;
599 unsigned long flags;
600 bool start_async;
601 int err, ret = 0;
602
603 spin_lock_irqsave(&dd->lock, flags);
604 if (new_areq)
605 ret = crypto_enqueue_request(&dd->queue, new_areq);
606 if (dd->flags & FLAGS_BUSY) {
607 spin_unlock_irqrestore(&dd->lock, flags);
608 return ret;
609 }
610
611 backlog = crypto_get_backlog(&dd->queue);
612 areq = crypto_dequeue_request(&dd->queue);
613 if (areq) {
614 dd->flags |= FLAGS_BUSY;
615 }
616 spin_unlock_irqrestore(&dd->lock, flags);
617 if (!areq)
618 return ret;
619
620 if (backlog)
621 backlog->complete(backlog, -EINPROGRESS);
622
623 ctx = crypto_tfm_ctx(areq->tfm);
624 dd->areq = areq;
625 dd->ctx = ctx;
626 start_async = (areq != new_areq);
627 dd->is_async = start_async;
628
629 /* WARNING: ctx->start() MAY change dd->is_async. */
630 err = ctx->start(dd);
631 return (start_async) ? ret : err;
632}
633
634static int asr_cipher(struct skcipher_request *req, unsigned long mode)
635{
636 int ret;
637 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
638 struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
639 struct asr_cipher_reqctx *rctx;
640
641 ctx->block_size = CIPHER_BLOCK_SIZE;
642 rctx = skcipher_request_ctx(req);
643 rctx->mode = mode;
644 rctx->use_rkek = ctx->use_rkek;
645
646 if (!(mode & FLAGS_ENCRYPT) && (req->src == req->dst)) {
647 unsigned int ivsize = crypto_skcipher_ivsize(cipher);
648 if (req->cryptlen >= ivsize) {
649 scatterwalk_map_and_copy(rctx->lastc, req->src,
650 req->cryptlen - ivsize,
651 ivsize, 0);
652 }
653 }
654
655 ret = asr_cipher_handle_queue(ctx->dd, &req->base);
656
657 asr_cipher_put(ctx->dd);
658 return ret;
659}
660
661static int asr_cipher_setkey(struct crypto_skcipher *cipher, const u8 *key,
662 unsigned int keylen)
663{
664 struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
665 struct asr_te200_cipher *dd = asr_cipher_local;
666
667 ctx->dd = dd;
668 ctx->use_rkek = false;
669
670 if (keylen != AES_KEYSIZE_128 &&
671 keylen != AES_KEYSIZE_192 &&
672 keylen != AES_KEYSIZE_256) {
673 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
674 return -EINVAL;
675 }
676
677 memcpy(ctx->key, key, keylen);
678 ctx->keylen = keylen;
679
680 return 0;
681}
682
683static int asr_cipher_set_hwkey(struct crypto_skcipher *cipher, const u8 *key,
684 unsigned int keylen)
685{
686 struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
687 struct asr_te200_cipher *dd = asr_cipher_local;
688
689 ctx->dd = dd;
690 if (!dd->rkek_burned)
691 return -EPERM;
692
693 ctx->use_rkek = true;
694
695 if (keylen != AES_KEYSIZE_128 &&
696 keylen != AES_KEYSIZE_192 &&
697 keylen != AES_KEYSIZE_256) {
698 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
699 return -EINVAL;
700 }
701
702 memcpy(ctx->key, key, keylen);
703 ctx->keylen = keylen;
704
705 return 0;
706}
707
708static int asr_cipher_rkek_fused(struct asr_te200_cipher *dd)
709{
710#define GEU_FUSE_VAL_APCFG2 (0x0408)
711#define GEU_SECURE_KEY_ACCESS_DISABLED (1 << 29)
712
713 uint32_t value;
714 struct device_node *np;
715 struct resource res;
716 void __iomem *io_base;
717
718 /* get geu node */
719 np = of_find_compatible_node(NULL, NULL, "asr,asr-geu");
720 if (!np) {
721 dev_err(dd->dev, "can't find geu node to check rkek burned");
722 return 0;
723 }
724
725 if (of_address_to_resource(np, 0, &res)) {
726 dev_err(dd->dev, "can't find geu address");
727 return 0;
728 }
729
730 io_base = ioremap(res.start, res.end - res.start);
731 if (!io_base) {
732 dev_err(dd->dev, "geu regs can't remap");
733 return 0;
734 }
735
736 value = readl_relaxed(io_base + GEU_FUSE_VAL_APCFG2);
737 if (value & GEU_SECURE_KEY_ACCESS_DISABLED) {
738 iounmap(io_base);
739 return 1;
740 }
741
742 iounmap(io_base);
743 return 0;
744}
745
746static int asr_aes_ecb_encrypt(struct skcipher_request *req)
747{
748 return asr_cipher(req, FLAGS_AES | FLAGS_ECB | FLAGS_ENCRYPT);
749}
750
751static int asr_aes_ecb_decrypt(struct skcipher_request *req)
752{
753 return asr_cipher(req, FLAGS_AES | FLAGS_ECB);
754}
755
756static int asr_aes_cbc_encrypt(struct skcipher_request *req)
757{
758 return asr_cipher(req, FLAGS_AES | FLAGS_CBC | FLAGS_ENCRYPT);
759}
760
761static int asr_aes_cbc_decrypt(struct skcipher_request *req)
762{
763 return asr_cipher(req, FLAGS_AES | FLAGS_CBC);
764}
765
766static int asr_aes_ctr_encrypt(struct skcipher_request *req)
767{
768 return asr_cipher(req, FLAGS_AES | FLAGS_CTR | FLAGS_ENCRYPT);
769}
770
771static int asr_aes_ctr_decrypt(struct skcipher_request *req)
772{
773 return asr_cipher(req, FLAGS_AES | FLAGS_CTR);
774}
775
776static int asr_sm4_ecb_encrypt(struct skcipher_request *req)
777{
778 return asr_cipher(req, FLAGS_SM4 | FLAGS_ECB | FLAGS_ENCRYPT);
779}
780
781static int asr_sm4_ecb_decrypt(struct skcipher_request *req)
782{
783 return asr_cipher(req, FLAGS_SM4 | FLAGS_ECB);
784}
785
786static int asr_sm4_cbc_encrypt(struct skcipher_request *req)
787{
788 return asr_cipher(req, FLAGS_SM4 | FLAGS_CBC | FLAGS_ENCRYPT);
789}
790
791static int asr_sm4_cbc_decrypt(struct skcipher_request *req)
792{
793 return asr_cipher(req, FLAGS_SM4 | FLAGS_CBC);
794}
795
796static int asr_sm4_ctr_encrypt(struct skcipher_request *req)
797{
798 return asr_cipher(req, FLAGS_SM4 | FLAGS_CTR | FLAGS_ENCRYPT);
799}
800
801static int asr_sm4_ctr_decrypt(struct skcipher_request *req)
802{
803 return asr_cipher(req, FLAGS_SM4 | FLAGS_CTR);
804}
805
806static int asr_cipher_init(struct crypto_skcipher *tfm)
807{
808 struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
809
810 tfm->reqsize = sizeof(struct asr_cipher_reqctx);
811 ctx->start = asr_cipher_start;
812
813 return 0;
814}
815
816static int asr_cipher_hwkey_init(struct crypto_skcipher *tfm)
817{
818 struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
819 struct asr_te200_cipher *dd = asr_cipher_local;
820
821 if (!dd->rkek_burned)
822 return -EPERM;
823
824 tfm->reqsize = sizeof(struct asr_cipher_reqctx);
825 ctx->start = asr_cipher_start;
826
827 return 0;
828}
829
830static void asr_cipher_exit(struct crypto_skcipher *tfm)
831{
832 struct asr_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
833
834 memset(ctx, 0, sizeof(*ctx));
835}
836
837static struct skcipher_alg cipher_algs[] = {
838 /* AES - ECB */
839 {
840 .base = {
841 .cra_name = "ecb(aes)",
842 .cra_driver_name = "asr-ecb-aes",
843 .cra_priority = 300,
844 .cra_flags = CRYPTO_ALG_ASYNC,
845 .cra_blocksize = AES_BLOCK_SIZE,
846 .cra_ctxsize = sizeof(struct asr_cipher_ctx),
847 .cra_alignmask = 0xf,
848 .cra_module = THIS_MODULE,
849 },
850 .min_keysize = CIPHER_MIN_KEY_SIZE,
851 .max_keysize = CIPHER_MAX_KEY_SIZE,
852 .setkey = asr_cipher_setkey,
853 .encrypt = asr_aes_ecb_encrypt,
854 .decrypt = asr_aes_ecb_decrypt,
855 .init = asr_cipher_init,
856 .exit = asr_cipher_exit,
857 },
858 /* AES - CBC */
859 {
860 .base = {
861 .cra_name = "cbc(aes)",
862 .cra_driver_name = "asr-cbc-aes",
863 .cra_priority = 300,
864 .cra_flags = CRYPTO_ALG_ASYNC,
865 .cra_blocksize = AES_BLOCK_SIZE,
866 .cra_ctxsize = sizeof(struct asr_cipher_ctx),
867 .cra_alignmask = 0xf,
868 .cra_module = THIS_MODULE,
869 },
870 .min_keysize = CIPHER_MIN_KEY_SIZE,
871 .max_keysize = CIPHER_MAX_KEY_SIZE,
872 .setkey = asr_cipher_setkey,
873 .encrypt = asr_aes_cbc_encrypt,
874 .decrypt = asr_aes_cbc_decrypt,
875 .init = asr_cipher_init,
876 .exit = asr_cipher_exit,
877 .ivsize = AES_BLOCK_SIZE,
878 },
879 /* AES - CTR */
880 {
881 .base = {
882 .cra_name = "ctr(aes)",
883 .cra_driver_name = "asr-ctr-aes",
884 .cra_priority = 300,
885 .cra_flags = CRYPTO_ALG_ASYNC,
886 .cra_blocksize = AES_BLOCK_SIZE,
887 .cra_ctxsize = sizeof(struct asr_cipher_ctx),
888 .cra_alignmask = 0xf,
889 .cra_module = THIS_MODULE,
890 },
891 .min_keysize = CIPHER_MIN_KEY_SIZE,
892 .max_keysize = CIPHER_MAX_KEY_SIZE,
893 .setkey = asr_cipher_setkey,
894 .encrypt = asr_aes_ctr_encrypt,
895 .decrypt = asr_aes_ctr_decrypt,
896 .init = asr_cipher_init,
897 .exit = asr_cipher_exit,
898 .ivsize = AES_BLOCK_SIZE,
899 },
900
901 /* SM4 - ECB */
902 {
903 .base = {
904 .cra_name = "ecb(sm4)",
905 .cra_driver_name = "asr-ecb-sm4",
906 .cra_priority = 300,
907 .cra_flags = CRYPTO_ALG_ASYNC,
908 .cra_blocksize = SM4_BLOCK_SIZE,
909 .cra_ctxsize = sizeof(struct asr_cipher_ctx),
910 .cra_alignmask = 0xf,
911 .cra_module = THIS_MODULE,
912 },
913 .min_keysize = CIPHER_MIN_KEY_SIZE,
914 .max_keysize = CIPHER_MAX_KEY_SIZE,
915 .setkey = asr_cipher_setkey,
916 .encrypt = asr_sm4_ecb_encrypt,
917 .decrypt = asr_sm4_ecb_decrypt,
918 .init = asr_cipher_init,
919 .exit = asr_cipher_exit,
920 },
921 /* SM4 - CBC */
922 {
923 .base = {
924 .cra_name = "cbc(sm4)",
925 .cra_driver_name = "asr-cbc-sm4",
926 .cra_priority = 300,
927 .cra_flags = CRYPTO_ALG_ASYNC,
928 .cra_blocksize = SM4_BLOCK_SIZE,
929 .cra_ctxsize = sizeof(struct asr_cipher_ctx),
930 .cra_alignmask = 0xf,
931 .cra_module = THIS_MODULE,
932 },
933 .min_keysize = CIPHER_MIN_KEY_SIZE,
934 .max_keysize = CIPHER_MAX_KEY_SIZE,
935 .setkey = asr_cipher_setkey,
936 .encrypt = asr_sm4_cbc_encrypt,
937 .decrypt = asr_sm4_cbc_decrypt,
938 .init = asr_cipher_init,
939 .exit = asr_cipher_exit,
940 .ivsize = SM4_BLOCK_SIZE,
941 },
942 /* SM4 - CTR */
943 {
944 .base = {
945 .cra_name = "ctr(sm4)",
946 .cra_driver_name = "asr-ctr-sm4",
947 .cra_priority = 300,
948 .cra_flags = CRYPTO_ALG_ASYNC,
949 .cra_blocksize = SM4_BLOCK_SIZE,
950 .cra_ctxsize = sizeof(struct asr_cipher_ctx),
951 .cra_alignmask = 0xf,
952 .cra_module = THIS_MODULE,
953 },
954 .min_keysize = CIPHER_MIN_KEY_SIZE,
955 .max_keysize = CIPHER_MAX_KEY_SIZE,
956 .setkey = asr_cipher_setkey,
957 .encrypt = asr_sm4_ctr_encrypt,
958 .decrypt = asr_sm4_ctr_decrypt,
959 .init = asr_cipher_init,
960 .exit = asr_cipher_exit,
961 .ivsize = SM4_BLOCK_SIZE,
962 },
963
964 /* hardware key AES - ECB */
965 {
966 .base = {
967 .cra_name = "ecb(aes-hwkey)",
968 .cra_driver_name = "asr-ecb-aes",
969 .cra_priority = 300,
970 .cra_flags = CRYPTO_ALG_ASYNC,
971 .cra_blocksize = AES_BLOCK_SIZE,
972 .cra_ctxsize = sizeof(struct asr_cipher_ctx),
973 .cra_alignmask = 0xf,
974 .cra_module = THIS_MODULE,
975 },
976 .min_keysize = CIPHER_MIN_KEY_SIZE,
977 .max_keysize = CIPHER_MAX_KEY_SIZE,
978 .setkey = asr_cipher_set_hwkey,
979 .encrypt = asr_aes_ecb_encrypt,
980 .decrypt = asr_aes_ecb_decrypt,
981 .init = asr_cipher_hwkey_init,
982 .exit = asr_cipher_exit,
983 },
984 /* AES - CBC */
985 {
986 .base = {
987 .cra_name = "cbc(aes-hwkey)",
988 .cra_driver_name = "asr-cbc-aes",
989 .cra_priority = 300,
990 .cra_flags = CRYPTO_ALG_ASYNC,
991 .cra_blocksize = AES_BLOCK_SIZE,
992 .cra_ctxsize = sizeof(struct asr_cipher_ctx),
993 .cra_alignmask = 0xf,
994 .cra_module = THIS_MODULE,
995 },
996 .min_keysize = CIPHER_MIN_KEY_SIZE,
997 .max_keysize = CIPHER_MAX_KEY_SIZE,
998 .setkey = asr_cipher_set_hwkey,
999 .encrypt = asr_aes_cbc_encrypt,
1000 .decrypt = asr_aes_cbc_decrypt,
1001 .init = asr_cipher_hwkey_init,
1002 .exit = asr_cipher_exit,
1003 .ivsize = AES_BLOCK_SIZE,
1004 },
1005};
1006
1007static void asr_cipher_queue_task(unsigned long data)
1008{
1009 struct asr_te200_cipher *dd = (struct asr_te200_cipher *)data;
1010
1011 asr_cipher_handle_queue(dd, NULL);
1012}
1013
1014static void asr_cipher_done_task(unsigned long data)
1015{
1016 struct asr_te200_cipher *dd = (struct asr_te200_cipher *)data;
1017
1018 dd->is_async = true;
1019 (void)dd->resume(dd);
1020}
1021
1022int asr_te200_cipher_register(struct asr_te200_dev *te200_dd)
1023{
1024 int err, i, j;
1025 struct device_node *np = NULL;
1026 struct asr_te200_cipher *cipher_dd;
1027
1028 cipher_dd = &te200_dd->asr_cipher;
1029 cipher_dd->dev = te200_dd->dev;
1030 cipher_dd->io_base = te200_dd->io_base;
1031 cipher_dd->phys_base = te200_dd->phys_base;
1032
1033 np = cipher_dd->dev->of_node;
1034
1035
1036 cipher_dd->rkek_burned = asr_cipher_rkek_fused(cipher_dd);
1037
1038 asr_cipher_local = cipher_dd;
1039
1040 spin_lock_init(&cipher_dd->lock);
1041 mutex_init(&cipher_dd->cipher_lock);
1042 tasklet_init(&cipher_dd->done_task, asr_cipher_done_task,
1043 (unsigned long)cipher_dd);
1044 tasklet_init(&cipher_dd->queue_task, asr_cipher_queue_task,
1045 (unsigned long)cipher_dd);
1046 crypto_init_queue(&cipher_dd->queue, ASR_CIPHER_QUEUE_LENGTH);
1047
1048 for (i = 0; i < ARRAY_SIZE(cipher_algs); i++) {
1049 err = crypto_register_skcipher(&cipher_algs[i]);
1050 if (err){
1051 for (j = 0; j < i; j++)
1052 crypto_unregister_skcipher(&cipher_algs[j]);
1053 return err;
1054 }
1055 }
1056
1057 return 0;
1058}
1059EXPORT_SYMBOL_GPL(asr_te200_cipher_register);
1060
1061int asr_te200_cipher_unregister(struct asr_te200_dev *te200_dd)
1062{
1063 int i;
1064 struct asr_te200_cipher *cipher_dd = &te200_dd->asr_cipher;
1065
1066 for (i = 0; i < ARRAY_SIZE(cipher_algs); i++)
1067 crypto_unregister_skcipher(&cipher_algs[i]);
1068
1069 tasklet_kill(&cipher_dd->done_task);
1070 tasklet_kill(&cipher_dd->queue_task);
1071
1072 return 0;
1073}
1074EXPORT_SYMBOL_GPL(asr_te200_cipher_unregister);
1075
1076MODULE_LICENSE("GPL");
1077MODULE_AUTHOR("wangyonggan <yongganwang@asrmicro.com>");
1078MODULE_DESCRIPTION("ASR te200 cipher driver");