blob: f315b91c60036cfbffc4193c60cba0064bed0563 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001#include <linux/module.h>
2#include <linux/slab.h>
3#include <linux/err.h>
4#include <linux/clk-provider.h>
5#include <linux/clk.h>
6#include <linux/io.h>
7#include <linux/hw_random.h>
8#include <linux/platform_device.h>
9#include <linux/scatterlist.h>
10#include <crypto/scatterwalk.h>
11#include <linux/of_device.h>
12#include <linux/mutex.h>
13#include <linux/device.h>
14#include <linux/init.h>
15#include <linux/delay.h>
16#include <crypto/hmac.h>
17#include <crypto/md5.h>
18#include <crypto/sha.h>
19
20#include "asr-sha-optee.h"
21
22static struct asr_te200_sha *asr_sha_local = NULL;
23
24static struct teec_uuid pta_sha_uuid = ASR_SHA_ACCESS_UUID;
25static struct mutex queue_lock = __MUTEX_INITIALIZER(queue_lock);
26
27static int asrte200_optee_acquire_hash_init(struct asr_optee_sha_reqctx *ctx, struct teec_uuid *uuid, u32 cmd, u32 alg)
28{
29 struct tee_ioctl_invoke_arg invoke_arg;
30 struct tee_param params[2];
31 int ret = 0;
32
33 ret = asrte200_optee_open_ta(&ctx->asrte200_tee_ctx, uuid);
34 if (ret != 0) {
35 return ret;
36 }
37
38 memset(&invoke_arg, 0x0, sizeof(struct tee_ioctl_invoke_arg));
39 invoke_arg.func = cmd;
40 invoke_arg.session = ctx->asrte200_tee_ctx.session;
41 invoke_arg.num_params = 2;
42
43
44 params[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
45 params[0].u.value.a = alg;
46
47 params[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
48 params[1].u.value.a = (uint32_t)ctx;
49
50 ret = tee_client_invoke_func(ctx->asrte200_tee_ctx.tee_ctx, &invoke_arg, params);
51 if (ret != 0) {
52 goto exit;
53 } else if (invoke_arg.ret != 0) {
54 ret = -EIO;
55 goto exit;
56 }
57
58 return ret;
59
60exit:
61 asrte200_optee_close_ta(&ctx->asrte200_tee_ctx);
62 return ret;
63}
64
65static int asrte200_optee_acquire_hash_update(struct asr_optee_sha_reqctx *ctx, struct teec_uuid *uuid, u32 cmd, \
66 u32 alg, uint8_t *in, u32 inlen)
67{
68 struct tee_ioctl_invoke_arg invoke_arg;
69 struct tee_param params[2];
70 int ret = 0;
71 struct tee_shm *shm = NULL;
72 u8 *pbuf = NULL;
73
74 memset(&invoke_arg, 0x0, sizeof(struct tee_ioctl_invoke_arg));
75 invoke_arg.func = cmd;
76 invoke_arg.session = ctx->asrte200_tee_ctx.session;
77 invoke_arg.num_params = 2;
78
79 shm = tee_shm_alloc(ctx->asrte200_tee_ctx.tee_ctx, inlen, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
80 if (!shm) {
81 ret = -EINVAL;
82 goto exit;
83 }
84
85 pbuf = tee_shm_get_va(shm, 0);
86 memcpy(pbuf, in, inlen);
87
88 params[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
89 params[0].u.memref.shm_offs = 0;
90 params[0].u.memref.size = inlen;
91 params[0].u.memref.shm = shm;
92
93 params[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
94 params[1].u.value.a = (uint32_t)ctx;
95
96 ret = tee_client_invoke_func(ctx->asrte200_tee_ctx.tee_ctx, &invoke_arg, params);
97 if (ret != 0) {
98 goto exit;
99 } else if (invoke_arg.ret != 0) {
100 ret = -EIO;
101 goto exit;
102 }
103
104 tee_shm_free(shm);
105 return ret;
106
107exit:
108 tee_shm_free(shm);
109 asrte200_optee_close_ta(&ctx->asrte200_tee_ctx);
110 return ret;
111}
112
113static int asrte200_optee_acquire_hash_final(struct asr_optee_sha_reqctx *ctx, struct teec_uuid *uuid, u32 cmd, u32 alg, u8 *out, u8 outlen)
114{
115 struct tee_ioctl_invoke_arg invoke_arg;
116 struct tee_param params[2];
117 int ret = 0;
118 struct tee_shm *shm = NULL;
119 u8 *pbuf = NULL;
120
121 memset(&invoke_arg, 0x0, sizeof(struct tee_ioctl_invoke_arg));
122 invoke_arg.func = cmd;
123 invoke_arg.session = ctx->asrte200_tee_ctx.session;
124 invoke_arg.num_params = 2;
125
126 shm = tee_shm_alloc(ctx->asrte200_tee_ctx.tee_ctx, outlen, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
127 if (!shm) {
128 ret = -EINVAL;
129 goto exit;
130 }
131
132 params[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
133 params[0].u.memref.shm_offs = 0;
134 params[0].u.memref.size = outlen;
135 params[0].u.memref.shm = shm;
136
137 params[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
138 params[1].u.value.a = (uint32_t)ctx;
139
140 ret = tee_client_invoke_func(ctx->asrte200_tee_ctx.tee_ctx, &invoke_arg, params);
141 if (ret != 0) {
142 goto exit;
143 } else if (invoke_arg.ret != 0) {
144 ret = -EIO;
145 goto exit;
146 }
147
148 pbuf = tee_shm_get_va(shm, 0);
149 memcpy(out, pbuf, outlen);
150
151exit:
152 tee_shm_free(shm);
153 asrte200_optee_close_ta(&ctx->asrte200_tee_ctx);
154 return ret;
155}
156
157static int asr_sha_handle_queue(struct asr_te200_sha *dd,
158 struct ahash_request *req)
159{
160 struct crypto_async_request *async_req, *backlog;
161 struct asr_sha_ctx *ctx;
162 unsigned long flags;
163 bool start_async;
164 int err = 0, ret = 0;
165
166 spin_lock_irqsave(&dd->lock, flags);
167 if (req)
168 ret = ahash_enqueue_request(&dd->queue, req);
169
170 if (SHA_FLAGS_BUSY & dd->flags) {
171 spin_unlock_irqrestore(&dd->lock, flags);
172 return ret;
173 }
174
175 backlog = crypto_get_backlog(&dd->queue);
176 async_req = crypto_dequeue_request(&dd->queue);
177 if (async_req)
178 dd->flags |= SHA_FLAGS_BUSY;
179
180 spin_unlock_irqrestore(&dd->lock, flags);
181
182 if (!async_req) {
183 return ret;
184 }
185
186 if (backlog)
187 backlog->complete(backlog, -EINPROGRESS);
188
189 ctx = crypto_tfm_ctx(async_req->tfm);
190
191 dd->req = ahash_request_cast(async_req);
192 start_async = (dd->req != req);
193 dd->is_async = start_async;
194 dd->force_complete = false;
195
196 /* WARNING: ctx->start() MAY change dd->is_async. */
197 err = ctx->start(dd);
198 return (start_async) ? ret : err;
199}
200
201static int asr_sha_enqueue(struct ahash_request *req, unsigned int op)
202{
203 struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
204 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
205 struct asr_te200_sha *dd = ctx->dd;
206
207 ctx->op = op;
208
209 return asr_sha_handle_queue(dd, req);
210}
211
212static void asr_sha_copy_ready_hash(struct ahash_request *req)
213{
214 struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
215 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
216
217 if (!req->result)
218 return;
219
220 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
221 case SHA_FLAGS_SHA1:
222 memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
223 break;
224 case SHA_FLAGS_SHA224:
225 memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE);
226 break;
227 case SHA_FLAGS_SHA256:
228 memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
229 break;
230 default:
231 return;
232 }
233}
234
235static inline int asr_sha_complete(struct asr_te200_sha *dd, int err)
236{
237 struct ahash_request *req = dd->req;
238 struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
239 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
240
241 dd->flags &= ~(SHA_FLAGS_BUSY);
242 ctx->flags &= ~(SHA_FLAGS_FINAL);
243
244 if ((dd->is_async || dd->force_complete) && req->base.complete)
245 req->base.complete(&req->base, err);
246
247 /* handle new request */
248 tasklet_schedule(&dd->queue_task);
249
250 return err;
251}
252
253static size_t asr_sha_append_sg(struct asr_sha_reqctx *ctx)
254{
255 size_t count;
256
257 while ((ctx->bufcnt < ctx->buflen) && ctx->total) {
258 count = min(ctx->sg->length - ctx->offset, ctx->total);
259 count = min(count, ctx->buflen - ctx->bufcnt);
260
261 if (count <= 0) {
262 /*
263 * Check if count <= 0 because the buffer is full or
264 * because the sg length is 0. In the latest case,
265 * check if there is another sg in the list, a 0 length
266 * sg doesn't necessarily mean the end of the sg list.
267 */
268 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
269 ctx->sg = sg_next(ctx->sg);
270 continue;
271 } else {
272 break;
273 }
274 }
275
276 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
277 ctx->offset, count, 0);
278
279 ctx->bufcnt += count;
280 ctx->offset += count;
281 ctx->total -= count;
282
283 if (ctx->offset == ctx->sg->length) {
284 ctx->sg = sg_next(ctx->sg);
285 if (ctx->sg)
286 ctx->offset = 0;
287 else
288 ctx->total = 0;
289 }
290 }
291
292 return 0;
293}
294
295static int asr_sha_buff_init(struct asr_te200_sha *dd, uint32_t len)
296{
297 struct ahash_request *req = dd->req;
298 struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
299 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
300
301 ctx->buffer = (void *)__get_free_pages(GFP_KERNEL, get_order(len));
302 if (!ctx->buffer) {
303 dev_err(dd->dev, "unable to alloc pages.\n");
304 return -ENOMEM;
305 }
306
307 ctx->buflen = PAGE_SIZE << get_order(len);
308
309 return 0;
310}
311
312static void asr_sha_buff_cleanup(struct asr_te200_sha *dd, uint32_t len)
313{
314 struct ahash_request *req = dd->req;
315 struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
316 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
317
318 free_pages((unsigned long)ctx->buffer, get_order(len));
319 ctx->buflen = 0;
320}
321
322static int sha_init_req(struct asr_optee_sha_reqctx *optee_ctx)
323{
324 int ret = 0;
325 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
326
327 /* hardware: hash init */
328 ret = asrte200_optee_acquire_hash_init(optee_ctx, &pta_sha_uuid, \
329 CMD_SHA_INIT, ctx->alg);
330 if (ret)
331 return -EINVAL;
332 return 0;
333}
334
335static int sha_update_req(struct asr_optee_sha_reqctx *optee_ctx)
336{
337 int ret = 0;
338 size_t bufcnt;
339 uint8_t *pdata;
340 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
341 uint32_t buflen = ctx->total;
342
343 ret = asr_sha_buff_init(ctx->dd, ctx->total);
344 if (ret)
345 return -ENOMEM;
346
347 asr_sha_append_sg(ctx);
348 bufcnt = ctx->bufcnt;
349 ctx->bufcnt = 0;
350
351 pdata = (uint8_t *)ctx->buffer;
352
353 /* hashware: hash process */
354 ret = asrte200_optee_acquire_hash_update(optee_ctx, &pta_sha_uuid, \
355 CMD_SHA_UPDATE, ctx->alg, pdata, bufcnt);
356 if (ret)
357 ret = -EINVAL;
358
359 asr_sha_buff_cleanup(ctx->dd, buflen);
360 return ret;
361}
362
363static void sha_finish_req(struct asr_optee_sha_reqctx *optee_ctx, int *err)
364{
365 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
366 struct crypto_ahash *tfm = crypto_ahash_reqtfm(ctx->dd->req);
367 uint8_t *hash = (uint8_t *)ctx->digest;
368 uint32_t outlen = crypto_ahash_digestsize(tfm);
369
370 if (!(*err) && (ctx->flags & SHA_FLAGS_FINAL)) {
371 *err = asrte200_optee_acquire_hash_final(optee_ctx, &pta_sha_uuid, CMD_SHA_FINAL, \
372 ctx->alg, (uint8_t *)hash, outlen);
373 ctx->flags &= (~SHA_FLAGS_FINAL);
374 asr_sha_copy_ready_hash(ctx->dd->req);
375 } else {
376 ctx->flags |= SHA_FLAGS_ERROR;
377 }
378}
379
380static void sha_next_req(struct asr_optee_sha_reqctx *optee_ctx, int *err)
381{
382 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
383
384 if (likely(!(*err) && (SHA_FLAGS_FINAL & ctx->flags)))
385 sha_finish_req(optee_ctx, err);
386
387 (void)asr_sha_complete(ctx->dd, *err);
388}
389
390static int asr_sha_done(struct asr_te200_sha *dd);
391
392static int asr_sha_start(struct asr_te200_sha *dd)
393{
394 int err = 0;
395 struct ahash_request *req = dd->req;
396 struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
397 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
398
399 mutex_lock(&queue_lock);
400
401 dd->resume = asr_sha_done;
402
403 if ((ctx->flags & SHA_FLAGS_INIT)) {
404 err = sha_init_req(optee_ctx);
405 ctx->flags &= (~SHA_FLAGS_INIT);
406 }
407
408 if (!err) {
409 if (ctx->op == SHA_OP_UPDATE) {
410 err = sha_update_req(optee_ctx);
411 if (!err && (ctx->flags & SHA_FLAGS_FINUP))
412 /* no final() after finup() */
413 sha_finish_req(optee_ctx, &err);
414 } else if (ctx->op == SHA_OP_FINAL) {
415 sha_finish_req(optee_ctx, &err);
416 }
417 }
418
419 if (unlikely(err != -EINPROGRESS))
420 /* Task will not finish it, so do it here */
421 sha_next_req(optee_ctx, &err);
422
423 mutex_unlock(&queue_lock);
424 return err;
425}
426
427static int asr_sha_cra_init(struct crypto_tfm *tfm)
428{
429 struct asr_sha_ctx *ctx = crypto_tfm_ctx(tfm);
430 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
431 sizeof(struct asr_optee_sha_reqctx));
432 ctx->start = asr_sha_start;
433
434 return 0;
435}
436
437static void asr_sha_cra_exit(struct crypto_tfm *tfm)
438{
439 struct asr_sha_ctx *ctx = crypto_tfm_ctx(tfm);
440 memset(ctx, 0, sizeof(*ctx));
441}
442
443static inline void asr_sha_get(struct asr_te200_sha *dd)
444{
445 mutex_lock(&dd->sha_lock);
446}
447
448static inline void asr_sha_put(struct asr_te200_sha *dd)
449{
450 if(mutex_is_locked(&dd->sha_lock))
451 mutex_unlock(&dd->sha_lock);
452}
453
454static int asr_sha_init(struct ahash_request *req)
455{
456 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
457 struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
458 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
459 struct asr_te200_sha *dd = asr_sha_local;
460
461 asr_sha_get(dd);
462
463 ctx->dd = dd;
464 ctx->flags = 0;
465
466 switch (crypto_ahash_digestsize(tfm)) {
467 case SHA1_DIGEST_SIZE:
468 ctx->flags |= SHA_FLAGS_SHA1;
469 ctx->alg = TEE_ALG_SHA1;
470 break;
471 case SHA224_DIGEST_SIZE:
472 ctx->flags |= SHA_FLAGS_SHA224;
473 ctx->alg = TEE_ALG_SHA224;
474 break;
475 case SHA256_DIGEST_SIZE:
476 ctx->flags |= SHA_FLAGS_SHA256;
477 ctx->alg = TEE_ALG_SHA256;
478 break;
479 default:
480 asr_sha_put(dd);
481 return -EINVAL;
482 }
483
484 ctx->bufcnt = 0;
485 ctx->flags |= SHA_FLAGS_INIT;
486
487 asr_sha_put(dd);
488 return 0;
489}
490
491static int asr_sha_update(struct ahash_request *req)
492{
493 int ret = 0;
494 struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
495 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
496
497 asr_sha_get(ctx->dd);
498
499 ctx->total = req->nbytes;
500 ctx->sg = req->src;
501 ctx->offset = 0;
502
503 ret = asr_sha_enqueue(req, SHA_OP_UPDATE);
504
505 asr_sha_put(ctx->dd);
506 return ret;
507}
508
509static int asr_sha_final(struct ahash_request *req)
510{
511 int ret = 0;
512 struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
513 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
514
515 asr_sha_get(ctx->dd);
516
517 ctx->flags |= SHA_FLAGS_FINAL;
518 if (ctx->flags & SHA_FLAGS_ERROR) {
519 asr_sha_put(ctx->dd);
520 return 0; /* uncompleted hash is not needed */
521 }
522 ret = asr_sha_enqueue(req, SHA_OP_FINAL);
523
524 asr_sha_put(ctx->dd);
525 return ret;
526}
527
528static int asr_sha_finup(struct ahash_request *req)
529{
530 struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
531 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
532 int err1, err2;
533
534 ctx->flags |= SHA_FLAGS_FINUP;
535
536 err1 = asr_sha_update(req);
537 if (err1 == -EINPROGRESS ||
538 (err1 == -EBUSY && (ahash_request_flags(req) &
539 CRYPTO_TFM_REQ_MAY_BACKLOG))) {
540 asr_sha_put(ctx->dd);
541 return err1;
542 }
543 /*
544 * final() has to be always called to cleanup resources
545 * even if udpate() failed, except EINPROGRESS
546 */
547 err2 = asr_sha_final(req);
548
549 return err1 ?: err2;
550}
551
552static int asr_sha_digest(struct ahash_request *req)
553{
554 return asr_sha_init(req) ?: asr_sha_finup(req);
555}
556
557static int asr_sha_export(struct ahash_request *req, void *out)
558{
559 const struct asr_optee_sha_reqctx *ctx = ahash_request_ctx(req);
560
561 memcpy(out, ctx, sizeof(*ctx));
562 return 0;
563}
564
565static int asr_sha_import(struct ahash_request *req, const void *in)
566{
567 struct asr_optee_sha_reqctx *ctx = ahash_request_ctx(req);
568
569 memcpy(ctx, in, sizeof(*ctx));
570 return 0;
571}
572
573static struct ahash_alg sha_algs[] = {
574 /* sha1 */
575 {
576 .init = asr_sha_init,
577 .update = asr_sha_update,
578 .final = asr_sha_final,
579 .finup = asr_sha_finup,
580 .digest = asr_sha_digest,
581 .export = asr_sha_export,
582 .import = asr_sha_import,
583 .halg = {
584 .digestsize = SHA1_DIGEST_SIZE,
585 .statesize = sizeof(struct asr_optee_sha_reqctx),
586 .base = {
587 .cra_name = "sha1",
588 .cra_driver_name = "asr-sha1",
589 .cra_priority = 300,
590 .cra_flags = CRYPTO_ALG_ASYNC,
591 .cra_blocksize = SHA1_BLOCK_SIZE,
592 .cra_ctxsize = sizeof(struct asr_sha_ctx),
593 .cra_alignmask = 0,
594 .cra_module = THIS_MODULE,
595 .cra_init = asr_sha_cra_init,
596 .cra_exit = asr_sha_cra_exit,
597 }
598 }
599 },
600
601 /* sha256 */
602 {
603 .init = asr_sha_init,
604 .update = asr_sha_update,
605 .final = asr_sha_final,
606 .finup = asr_sha_finup,
607 .digest = asr_sha_digest,
608 .export = asr_sha_export,
609 .import = asr_sha_import,
610 .halg = {
611 .digestsize = SHA256_DIGEST_SIZE,
612 .statesize = sizeof(struct asr_optee_sha_reqctx),
613 .base = {
614 .cra_name = "sha256",
615 .cra_driver_name = "asr-sha256",
616 .cra_priority = 300,
617 .cra_flags = CRYPTO_ALG_ASYNC,
618 .cra_blocksize = SHA256_BLOCK_SIZE,
619 .cra_ctxsize = sizeof(struct asr_sha_ctx),
620 .cra_alignmask = 0,
621 .cra_module = THIS_MODULE,
622 .cra_init = asr_sha_cra_init,
623 .cra_exit = asr_sha_cra_exit,
624 }
625 }
626 },
627
628 /* sha224 */
629 {
630 .init = asr_sha_init,
631 .update = asr_sha_update,
632 .final = asr_sha_final,
633 .finup = asr_sha_finup,
634 .digest = asr_sha_digest,
635 .export = asr_sha_export,
636 .import = asr_sha_import,
637 .halg = {
638 .digestsize = SHA224_DIGEST_SIZE,
639 .statesize = sizeof(struct asr_optee_sha_reqctx),
640 .base = {
641 .cra_name = "sha224",
642 .cra_driver_name = "asr-sha224",
643 .cra_priority = 300,
644 .cra_flags = CRYPTO_ALG_ASYNC,
645 .cra_blocksize = SHA224_BLOCK_SIZE,
646 .cra_ctxsize = sizeof(struct asr_sha_ctx),
647 .cra_alignmask = 0,
648 .cra_module = THIS_MODULE,
649 .cra_init = asr_sha_cra_init,
650 .cra_exit = asr_sha_cra_exit,
651 }
652 }
653 },
654};
655
656static void asr_sha_queue_task(unsigned long data)
657{
658 struct asr_te200_sha *dd = (struct asr_te200_sha *)data;
659
660 asr_sha_handle_queue(dd, NULL);
661}
662
663static int asr_sha_done(struct asr_te200_sha *dd)
664{
665 int err = 0;
666 struct ahash_request *req = dd->req;
667 struct asr_optee_sha_reqctx *ctx = ahash_request_ctx(req);
668
669 sha_finish_req(ctx, 0);
670
671 return err;
672}
673
674static void asr_sha_done_task(unsigned long data)
675{
676 struct asr_te200_sha *dd = (struct asr_te200_sha *)data;
677
678 dd->is_async = true;
679 (void)dd->resume(dd);
680}
681
682static int hash_handle(int alg, uint8_t *in, uint32_t inlen, uint8_t *out)
683{
684 int ret = 0;
685 uint32_t outlen;
686 struct asr_optee_sha_reqctx ctx;
687
688 switch(alg) {
689 case TEE_ALG_SHA256:
690 outlen = 32;
691 break;
692 case TEE_ALG_SHA224:
693 outlen = 28;
694 break;
695 case TEE_ALG_SHA1:
696 outlen = 20;
697 break;
698 default:
699 ret = -1;
700 goto exit;
701 }
702
703 ret = asrte200_optee_acquire_hash_init(&ctx, &pta_sha_uuid, CMD_SHA_INIT, alg);
704 if (ret) {
705 ret = -1;
706 goto exit;
707 }
708
709 ret = asrte200_optee_acquire_hash_update(&ctx, &pta_sha_uuid, CMD_SHA_UPDATE, alg, in, inlen);
710 if (ret) {
711 ret = -1;
712 goto exit;
713 }
714
715 ret = asrte200_optee_acquire_hash_final(&ctx, &pta_sha_uuid, CMD_SHA_FINAL, alg, out, outlen);
716 if (ret) {
717 ret = -1;
718 goto exit;
719 }
720
721exit:
722 return ret;
723}
724
725static int tee_hwhash_func_verify(void)
726{
727 int ret = 0;
728 unsigned char out_sha256[32] = {0};
729 const struct {
730 const char *msg;
731 uint8_t hash[32];
732 } sha256_tests = {
733 "abc",
734 { 0xBA, 0x78, 0x16, 0xBF, 0x8F, 0x01,
735 0xCF, 0xEA, 0x41, 0x41, 0x40, 0xDE,
736 0x5D, 0xAE, 0x22, 0x23, 0xB0, 0x03,
737 0x61, 0xA3, 0x96, 0x17, 0x7A, 0x9C,
738 0xB4, 0x10, 0xFF, 0x61, 0xF2, 0x00,
739 0x15, 0xAD
740 }
741 };
742
743 ret = hash_handle(TEE_ALG_SHA256, (uint8_t *)sha256_tests.msg, strlen(sha256_tests.msg), out_sha256);
744 if (ret)
745 return ret;
746
747 if (memcmp(out_sha256, sha256_tests.hash, sizeof(out_sha256))) {
748 return -1;
749 }
750
751 return 0;
752}
753
754// #define ASR_TE200_SHA_TEST
755
756#ifdef ASR_TE200_SHA_TEST
757static int te200_sha_test(void);
758#endif
759
760int asr_te200_sha_register(struct asr_te200_dev *te200_dd)
761{
762 int err, i, j;
763 struct asr_te200_sha *sha_dd;
764
765 sha_dd = &te200_dd->asr_sha;
766 sha_dd->dev = te200_dd->dev;
767
768 asr_sha_local = sha_dd;
769
770 spin_lock_init(&sha_dd->lock);
771 mutex_init(&sha_dd->sha_lock);
772 tasklet_init(&sha_dd->done_task, asr_sha_done_task,
773 (unsigned long)sha_dd);
774 tasklet_init(&sha_dd->queue_task, asr_sha_queue_task,
775 (unsigned long)sha_dd);
776 crypto_init_queue(&sha_dd->queue, ASR_SHA_QUEUE_LENGTH);
777
778 /* don't register sha if hash verify err in tos */
779 err = tee_hwhash_func_verify();
780 if (err)
781 return err;
782
783 for (i = 0; i < ARRAY_SIZE(sha_algs); i++) {
784 err = crypto_register_ahash(&sha_algs[i]);
785 if (err)
786 goto err_sha_algs;
787 }
788
789#ifdef ASR_TE200_SHA_TEST
790 te200_sha_test();
791#endif
792
793 return 0;
794
795err_sha_algs:
796 for (j = 0; j < i; j++)
797 crypto_unregister_ahash(&sha_algs[j]);
798
799 return err;
800}
801EXPORT_SYMBOL_GPL(asr_te200_sha_register);
802
803int asr_te200_sha_unregister(struct asr_te200_dev *te200_dd)
804{
805 int i;
806 struct asr_te200_sha *sha_dd = &te200_dd->asr_sha;
807
808 for (i = 0; i < ARRAY_SIZE(sha_algs); i++)
809 crypto_unregister_ahash(&sha_algs[i]);
810
811 tasklet_kill(&sha_dd->queue_task);
812 tasklet_kill(&sha_dd->done_task);
813
814 return 0;
815}
816EXPORT_SYMBOL_GPL(asr_te200_sha_unregister);
817
818
819
820#ifdef ASR_TE200_SHA_TEST
821static int te200_sha_test(void)
822{
823 int ret = 0;
824
825 const struct {
826 const char *msg;
827 uint8_t hash[20];
828 } sha1_tests[] = {
829 {
830 "abc",
831 { 0xa9, 0x99, 0x3e, 0x36, 0x47, 0x06,
832 0x81, 0x6a, 0xba, 0x3e, 0x25, 0x71,
833 0x78, 0x50, 0xc2, 0x6c, 0x9c, 0xd0,
834 0xd8, 0x9d
835 }
836 },
837 {
838 "asjhsdjljfdsdjjkdfwyqeuwouzxkmcxjkmwqdsjklfdfjlkdfkfs" \
839 "fkjlfskjdflioherfjjfdjkfdnkfdfdojjodfjdfjflj;sljjlfkkl" \
840 "nfnkgbhhoigfhigfopojpfjojpoffkjlfskjdflioherfjjfdjkfdn" \
841 "kfdfdojjodfjdfjfljnfnkgbhhoigfhigfoponfnkgbhhoigfhigfopojpfjo",
842 {
843 0x93, 0x84, 0x7f, 0x98, 0x22, 0x5e,
844 0x6d, 0xf2, 0x09, 0x1c, 0xc9, 0xac,
845 0xbb, 0x5d, 0x00, 0x2d, 0x64, 0x81,
846 0xe3, 0xcd
847 }
848 },
849 {
850 "asjhsdjljfdsdjjkdfwyqeuwouzxkmcxjkmwqdsjklfdfjlkdfkfs" \
851 "fkjlfskjdflioherfjjfdjkfdnkfdfdojjodfjdfjflj;sljjlfkkl" \
852 "nfnkgbhhoigfhigfopojpfjojpoffkjlfskjdflioherfjjfdjkfdn" \
853 "kfdfdojjodfjdfjfljnfnkgbhhoigfhigfoponfnkgbhhoigfhigfopojpfjoewiroiowiod",
854 {
855 0x6a, 0x66, 0xc2, 0x87, 0x84, 0x36,
856 0x14, 0x90, 0x99, 0x03, 0x90, 0xf0,
857 0xaa, 0x7e, 0xbd, 0xc7, 0xdb, 0x38,
858 0x54, 0x09
859 }
860 },
861 {
862 "asjhsdjljfdsdjjkdfwyqeuwouzxkmcxjkmwqds"
863 "jklfdfjlkdfkfsfkjlfskjdflioherfjjfdjkfd"
864 "nkfdfdojjodfjdfjflj;sljjlfkklnfnkgbhhoi"
865 "gfhigfopojpfjojpoffkjlfskjdflioherfjjfd"
866 "jkfdnkfdfdojjodfjdfjfljnfnkgbhhoigfhigf"
867 "oponfnkgbhhoigfhigfopojpfjoewiroiowiods"
868 "djkisijdknknkskdnknflnnesniewinoinknmdn"
869 "kknknsdnjjfsnnkfnkknslnklknfnknkflksnlk"
870 "lskldklklklnmlflmlmlfmlfml",
871 {
872 0xc4, 0x53, 0xca, 0x24, 0xfa, 0xe5,
873 0x39, 0x53, 0x08, 0x8c, 0x57, 0x1a,
874 0x96, 0xe9, 0x64, 0x7f, 0xd5, 0xf9,
875 0x13, 0x91
876 }
877 }
878 };
879
880 struct asr_optee_sha_reqctx ctx1;
881 struct asr_optee_sha_reqctx ctx2;
882 struct asr_optee_sha_reqctx ctx3;
883 struct asr_optee_sha_reqctx ctx4;
884 unsigned char out_sha1_1[20] = {0};
885 unsigned char out_sha1_2[20] = {0};
886 unsigned char out_sha1_3[20] = {0};
887 unsigned char out_sha1_4[20] = {0};
888
889 ret = asrte200_optee_acquire_hash_init(&ctx1, &pta_sha_uuid, CMD_SHA_INIT, TEE_ALG_SHA1);
890 if (ret) {
891 return ret;
892 }
893
894 ret = asrte200_optee_acquire_hash_init(&ctx2, &pta_sha_uuid, CMD_SHA_INIT, TEE_ALG_SHA1);
895 if (ret) {
896 return ret;
897 }
898
899 ret = asrte200_optee_acquire_hash_update(&ctx1, &pta_sha_uuid, CMD_SHA_UPDATE, TEE_ALG_SHA1,
900 (uint8_t *)sha1_tests[0].msg, strlen(sha1_tests[0].msg));
901 if (ret) {
902 return ret;
903 }
904
905 ret = asrte200_optee_acquire_hash_init(&ctx3, &pta_sha_uuid, CMD_SHA_INIT, TEE_ALG_SHA1);
906 if (ret) {
907 return ret;
908 }
909
910 ret = asrte200_optee_acquire_hash_update(&ctx2, &pta_sha_uuid, CMD_SHA_UPDATE, TEE_ALG_SHA1,
911 (uint8_t *)sha1_tests[1].msg, 10);
912 if (ret) {
913 return ret;
914 }
915
916 ret = asrte200_optee_acquire_hash_update(&ctx2, &pta_sha_uuid, CMD_SHA_UPDATE, TEE_ALG_SHA1,
917 (uint8_t *)(((uint32_t)sha1_tests[1].msg)+10), strlen(sha1_tests[1].msg) - 10);
918 if (ret) {
919 return ret;
920 }
921
922 ret = asrte200_optee_acquire_hash_final(&ctx1, &pta_sha_uuid, CMD_SHA_FINAL, TEE_ALG_SHA1,
923 out_sha1_1, sizeof(out_sha1_1));
924 if (ret) {
925 return ret;
926 }
927
928 ret = asrte200_optee_acquire_hash_update(&ctx3, &pta_sha_uuid, CMD_SHA_UPDATE, TEE_ALG_SHA1,
929 (uint8_t *)sha1_tests[2].msg, 25);
930 if (ret) {
931 return ret;
932 }
933
934 ret = asrte200_optee_acquire_hash_init(&ctx4, &pta_sha_uuid, CMD_SHA_INIT, TEE_ALG_SHA1);
935 if (ret) {
936 return ret;
937 }
938
939 ret = asrte200_optee_acquire_hash_final(&ctx2, &pta_sha_uuid, CMD_SHA_FINAL, TEE_ALG_SHA1,
940 out_sha1_2, sizeof(out_sha1_2));
941 if (ret) {
942 return ret;
943 }
944
945 ret = asrte200_optee_acquire_hash_update(&ctx3, &pta_sha_uuid, CMD_SHA_UPDATE, TEE_ALG_SHA1,
946 (uint8_t *)(((uint32_t)sha1_tests[2].msg)+25), strlen(sha1_tests[2].msg)-25);
947 if (ret) {
948 return ret;
949 }
950
951 ret = asrte200_optee_acquire_hash_final(&ctx3, &pta_sha_uuid, CMD_SHA_FINAL, TEE_ALG_SHA1,
952 out_sha1_3, sizeof(out_sha1_3));
953 if (ret) {
954 return ret;
955 }
956
957 ret = asrte200_optee_acquire_hash_update(&ctx4, &pta_sha_uuid, CMD_SHA_UPDATE, TEE_ALG_SHA1,
958 (uint8_t *)sha1_tests[3].msg, 43);
959 if (ret) {
960 return ret;
961 }
962 ret = asrte200_optee_acquire_hash_update(&ctx4, &pta_sha_uuid, CMD_SHA_UPDATE, TEE_ALG_SHA1,
963 (uint8_t *)(((uint32_t)sha1_tests[3].msg)+43), strlen(sha1_tests[3].msg)-43);
964 if (ret) {
965 return ret;
966 }
967
968 ret = asrte200_optee_acquire_hash_final(&ctx4, &pta_sha_uuid, CMD_SHA_FINAL, TEE_ALG_SHA1,
969 out_sha1_4, sizeof(out_sha1_4));
970 if (ret) {
971 return ret;
972 }
973
974 if (memcmp(out_sha1_1, sha1_tests[0].hash, sizeof(out_sha1_1))) {
975 printk("sha1 test 0 failed");
976 } else {
977 printk("sha1 test 0 pass");
978 }
979 if (memcmp(out_sha1_2, sha1_tests[1].hash, sizeof(out_sha1_2))) {
980 printk("sha1 test 1 failed");
981 } else {
982 printk("sha1 test 1 pass");
983 }
984 if (memcmp(out_sha1_3, sha1_tests[2].hash, sizeof(out_sha1_3))) {
985 printk("sha1 test 2 failed");
986 } else {
987 printk("sha1 test 2 pass");
988 }
989 if (memcmp(out_sha1_4, sha1_tests[3].hash, sizeof(out_sha1_4))) {
990 printk("sha1 test 3 failed");
991 } else {
992 printk("sha1 test 4 pass");
993 }
994
995
996 return 0;
997}
998#endif
999
1000MODULE_LICENSE("GPL");
1001MODULE_AUTHOR("wangyonggan <yongganwang@asrmicro.com>");
1002MODULE_DESCRIPTION("ASR te200 sha driver");