blob: fcb9bb78733a4a06a9ca0ff6f47add9697979db7 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001#include <linux/module.h>
2#include <linux/slab.h>
3#include <linux/err.h>
4#include <linux/clk-provider.h>
5#include <linux/clk.h>
6#include <linux/io.h>
7#include <linux/hw_random.h>
8#include <linux/platform_device.h>
9#include <linux/scatterlist.h>
10#include <crypto/scatterwalk.h>
11#include <linux/of_device.h>
12#include <linux/mutex.h>
13#include <linux/device.h>
14#include <linux/init.h>
15#include <linux/delay.h>
16#include <crypto/hmac.h>
17#include <crypto/md5.h>
18#include <crypto/sha.h>
19
20#include "asr-sha-optee.h"
21
22static struct asr_bcm_sha *asr_sha_local = NULL;
23
24static struct teec_uuid pta_sha_uuid = ASR_SHA_ACCESS_UUID;
25
26static int asrbcm_optee_acquire_hash_init(struct asr_optee_sha_reqctx *ctx, struct teec_uuid *uuid, u32 cmd, u32 alg)
27{
28 struct tee_ioctl_invoke_arg invoke_arg;
29 struct tee_param params[2];
30 int ret = 0;
31
32 ret = asrbcm_optee_open_ta(&ctx->asrbcm_tee_ctx, uuid);
33 if (ret != 0) {
34 return ret;
35 }
36
37 memset(&invoke_arg, 0x0, sizeof(struct tee_ioctl_invoke_arg));
38 invoke_arg.func = cmd;
39 invoke_arg.session = ctx->asrbcm_tee_ctx.session;
40 invoke_arg.num_params = 2;
41
42 params[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
43 params[0].u.value.a = alg;
44
45 params[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
46 params[1].u.value.a = (uint32_t)ctx;
47
48 ret = tee_client_invoke_func(ctx->asrbcm_tee_ctx.tee_ctx, &invoke_arg, params);
49 if (ret != 0) {
50 goto exit;
51 } else if (invoke_arg.ret != 0) {
52 ret = -EIO;
53 goto exit;
54 }
55
56 return ret;
57
58exit:
59 asrbcm_optee_close_ta(&ctx->asrbcm_tee_ctx);
60 return ret;
61}
62
63static int asrbcm_optee_acquire_hash_update(struct asr_optee_sha_reqctx *ctx, struct teec_uuid *uuid, u32 cmd, \
64 u32 alg, uint8_t *in, u32 inlen)
65{
66 struct tee_ioctl_invoke_arg invoke_arg;
67 struct tee_param params[2];
68 int ret = 0;
69 struct tee_shm *shm = NULL;
70 u8 *pbuf = NULL;
71
72 memset(&invoke_arg, 0x0, sizeof(struct tee_ioctl_invoke_arg));
73 invoke_arg.func = cmd;
74 invoke_arg.session = ctx->asrbcm_tee_ctx.session;
75 invoke_arg.num_params = 2;
76
77 shm = tee_shm_alloc(ctx->asrbcm_tee_ctx.tee_ctx, inlen, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
78 if (!shm) {
79 ret = -EINVAL;
80 goto exit;
81 }
82
83 pbuf = tee_shm_get_va(shm, 0);
84 memcpy(pbuf, in, inlen);
85
86 params[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
87 params[0].u.memref.shm_offs = 0;
88 params[0].u.memref.size = inlen;
89 params[0].u.memref.shm = shm;
90
91 params[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
92 params[1].u.value.a = (uint32_t)ctx;
93
94 ret = tee_client_invoke_func(ctx->asrbcm_tee_ctx.tee_ctx, &invoke_arg, params);
95 if (ret != 0) {
96 goto exit;
97 } else if (invoke_arg.ret != 0) {
98 ret = -EIO;
99 goto exit;
100 }
101
102 tee_shm_free(shm);
103 return ret;
104
105exit:
106 tee_shm_free(shm);
107 asrbcm_optee_close_ta(&ctx->asrbcm_tee_ctx);
108 return ret;
109}
110
111static int asrbcm_optee_acquire_hash_final(struct asr_optee_sha_reqctx *ctx, struct teec_uuid *uuid, u32 cmd, u32 alg, u8 *out, u8 outlen)
112{
113 struct tee_ioctl_invoke_arg invoke_arg;
114 struct tee_param params[2];
115 int ret = 0;
116 struct tee_shm *shm = NULL;
117 u8 *pbuf = NULL;
118
119 memset(&invoke_arg, 0x0, sizeof(struct tee_ioctl_invoke_arg));
120 invoke_arg.func = cmd;
121 invoke_arg.session = ctx->asrbcm_tee_ctx.session;
122 invoke_arg.num_params = 2;
123
124 shm = tee_shm_alloc(ctx->asrbcm_tee_ctx.tee_ctx, outlen, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
125 if (!shm) {
126 ret = -EINVAL;
127 goto exit;
128 }
129
130 params[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
131 params[0].u.memref.shm_offs = 0;
132 params[0].u.memref.size = outlen;
133 params[0].u.memref.shm = shm;
134
135 params[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
136 params[1].u.value.a = (uint32_t)ctx;
137
138 ret = tee_client_invoke_func(ctx->asrbcm_tee_ctx.tee_ctx, &invoke_arg, params);
139 if (ret != 0) {
140 goto exit;
141 } else if (invoke_arg.ret != 0) {
142 ret = -EIO;
143 goto exit;
144 }
145
146 pbuf = tee_shm_get_va(shm, 0);
147 memcpy(out, pbuf, outlen);
148
149exit:
150 tee_shm_free(shm);
151 asrbcm_optee_close_ta(&ctx->asrbcm_tee_ctx);
152 return ret;
153}
154
155static int asr_sha_handle_queue(struct asr_bcm_sha *dd,
156 struct ahash_request *req)
157{
158 struct crypto_async_request *async_req, *backlog;
159 struct asr_sha_ctx *ctx;
160 unsigned long flags;
161 bool start_async;
162 int err = 0, ret = 0;
163
164 spin_lock_irqsave(&dd->lock, flags);
165 if (req)
166 ret = ahash_enqueue_request(&dd->queue, req);
167
168 if (SHA_FLAGS_BUSY & dd->flags) {
169 spin_unlock_irqrestore(&dd->lock, flags);
170 return ret;
171 }
172
173 backlog = crypto_get_backlog(&dd->queue);
174 async_req = crypto_dequeue_request(&dd->queue);
175 if (async_req)
176 dd->flags |= SHA_FLAGS_BUSY;
177
178 spin_unlock_irqrestore(&dd->lock, flags);
179
180 if (!async_req) {
181 return ret;
182 }
183
184 if (backlog)
185 backlog->complete(backlog, -EINPROGRESS);
186
187 ctx = crypto_tfm_ctx(async_req->tfm);
188
189 dd->req = ahash_request_cast(async_req);
190 start_async = (dd->req != req);
191 dd->is_async = start_async;
192 dd->force_complete = false;
193
194 /* WARNING: ctx->start() MAY change dd->is_async. */
195 err = ctx->start(dd);
196 return (start_async) ? ret : err;
197}
198
199static int asr_sha_enqueue(struct ahash_request *req, unsigned int op)
200{
201 struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
202 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
203
204 struct asr_bcm_sha *dd = ctx->dd;
205
206 ctx->op = op;
207
208 return asr_sha_handle_queue(dd, req);
209}
210
211static void asr_sha_copy_ready_hash(struct ahash_request *req)
212{
213 struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
214 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
215
216 if (!req->result)
217 return;
218
219 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
220 case SHA_FLAGS_MD5:
221 memcpy(req->result, ctx->digest, MD5_DIGEST_SIZE);
222 break;
223 case SHA_FLAGS_SHA1:
224 memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
225 break;
226 case SHA_FLAGS_SHA224:
227 memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE);
228 break;
229 case SHA_FLAGS_SHA256:
230 memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
231 break;
232 case SHA_FLAGS_SHA384:
233 memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE);
234 break;
235 case SHA_FLAGS_SHA512:
236 memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE);
237 break;
238 default:
239 return;
240 }
241}
242
243static inline int asr_sha_complete(struct asr_bcm_sha *dd, int err)
244{
245 struct ahash_request *req = dd->req;
246 struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
247 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
248
249 dd->flags &= ~(SHA_FLAGS_BUSY);
250 ctx->flags &= ~(SHA_FLAGS_FINAL);
251
252 if ((dd->is_async || dd->force_complete) && req->base.complete)
253 req->base.complete(&req->base, err);
254
255 /* handle new request */
256 tasklet_schedule(&dd->queue_task);
257
258 return err;
259}
260
261static size_t asr_sha_append_sg(struct asr_sha_reqctx *ctx)
262{
263 size_t count;
264
265 while ((ctx->bufcnt < ctx->buflen) && ctx->total) {
266 count = min(ctx->sg->length - ctx->offset, ctx->total);
267 count = min(count, ctx->buflen - ctx->bufcnt);
268
269 if (count <= 0) {
270 /*
271 * Check if count <= 0 because the buffer is full or
272 * because the sg length is 0. In the latest case,
273 * check if there is another sg in the list, a 0 length
274 * sg doesn't necessarily mean the end of the sg list.
275 */
276 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
277 ctx->sg = sg_next(ctx->sg);
278 continue;
279 } else {
280 break;
281 }
282 }
283
284 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
285 ctx->offset, count, 0);
286
287 ctx->bufcnt += count;
288 ctx->offset += count;
289 ctx->total -= count;
290
291 if (ctx->offset == ctx->sg->length) {
292 ctx->sg = sg_next(ctx->sg);
293 if (ctx->sg)
294 ctx->offset = 0;
295 else
296 ctx->total = 0;
297 }
298 }
299
300 return 0;
301}
302
303static int asr_sha_buff_init(struct asr_bcm_sha *dd, uint32_t len)
304{
305 struct ahash_request *req = dd->req;
306 struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
307 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
308
309 ctx->buffer = (void *)__get_free_pages(GFP_KERNEL, get_order(len));
310 if (!ctx->buffer) {
311 dev_err(dd->dev, "unable to alloc pages.\n");
312 return -ENOMEM;
313 }
314
315 ctx->buflen = PAGE_SIZE << get_order(len);
316
317 return 0;
318}
319
320static void asr_sha_buff_cleanup(struct asr_bcm_sha *dd, uint32_t len)
321{
322 struct ahash_request *req = dd->req;
323 struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
324 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
325
326 free_pages((unsigned long)ctx->buffer, get_order(len));
327 ctx->buflen = 0;
328}
329
330static int sha_init_req(struct asr_optee_sha_reqctx *optee_ctx)
331{
332 int ret = 0;
333 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
334
335 /* hardware: hash init */
336 ret = asrbcm_optee_acquire_hash_init(optee_ctx, &pta_sha_uuid, \
337 CMD_SHA_INIT, ctx->md.alg);
338 if (ret)
339 return -EINVAL;
340 return 0;
341}
342
343static int sha_update_req(struct asr_optee_sha_reqctx *optee_ctx)
344{
345 int ret = 0;
346 int bufcnt;
347 uint8_t *pdata;
348 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
349 uint32_t buflen = ctx->total;
350
351 ret = asr_sha_buff_init(ctx->dd, ctx->total);
352 if (ret)
353 return -ENOMEM;
354
355 asr_sha_append_sg(ctx);
356 bufcnt = ctx->bufcnt;
357 ctx->bufcnt = 0;
358
359 pdata = (uint8_t *)ctx->buffer;
360
361 /* hashware: hash process */
362 ret = asrbcm_optee_acquire_hash_update(optee_ctx, &pta_sha_uuid, \
363 CMD_SHA_UPDATE, ctx->md.alg, pdata, bufcnt);
364 if (ret)
365 ret = -EINVAL;
366
367 asr_sha_buff_cleanup(ctx->dd, buflen);
368 return ret;
369}
370
371static void sha_finish_req(struct asr_optee_sha_reqctx *optee_ctx, int *err)
372{
373 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
374 struct crypto_ahash *tfm = crypto_ahash_reqtfm(ctx->dd->req);
375 uint8_t *hash = (uint8_t *)ctx->digest;
376 uint32_t outlen = crypto_ahash_digestsize(tfm);
377
378 if (!(*err) && (ctx->flags & SHA_FLAGS_FINAL)) {
379 *err = asrbcm_optee_acquire_hash_final(optee_ctx, &pta_sha_uuid, CMD_SHA_FINAL, \
380 ctx->md.alg, (uint8_t *)hash, outlen);
381 ctx->flags &= (~SHA_FLAGS_FINAL);
382 asr_sha_copy_ready_hash(ctx->dd->req);
383 } else {
384 ctx->flags |= SHA_FLAGS_ERROR;
385 }
386}
387
388static void sha_next_req(struct asr_optee_sha_reqctx *optee_ctx, int *err)
389{
390 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
391
392 if (likely(!(*err) && (SHA_FLAGS_FINAL & ctx->flags)))
393 sha_finish_req(optee_ctx, err);
394
395 (void)asr_sha_complete(ctx->dd, *err);
396}
397
398static int asr_sha_done(struct asr_bcm_sha *dd);
399
400static int asr_sha_start(struct asr_bcm_sha *dd)
401{
402 int err = 0;
403 struct ahash_request *req = dd->req;
404 struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
405 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
406
407 mutex_lock(&dd->queue_lock);
408
409 dd->resume = asr_sha_done;
410
411 if ((ctx->flags & SHA_FLAGS_INIT)) {
412 err = sha_init_req(optee_ctx);
413 ctx->flags &= (~SHA_FLAGS_INIT);
414 }
415
416 if (!err) {
417 if (ctx->op == SHA_OP_UPDATE) {
418 err = sha_update_req(optee_ctx);
419 if (!err && (ctx->flags & SHA_FLAGS_FINUP))
420 /* no final() after finup() */
421 sha_finish_req(optee_ctx, &err);
422 } else if (ctx->op == SHA_OP_FINAL) {
423 sha_finish_req(optee_ctx, &err);
424 }
425 }
426
427 if (unlikely(err != -EINPROGRESS))
428 /* Task will not finish it, so do it here */
429 sha_next_req(optee_ctx, &err);
430
431 mutex_unlock(&dd->queue_lock);
432 return err;
433}
434
435static int asr_sha_cra_init(struct crypto_tfm *tfm)
436{
437 struct asr_sha_ctx *ctx = crypto_tfm_ctx(tfm);
438 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
439 sizeof(struct asr_optee_sha_reqctx));
440 ctx->start = asr_sha_start;
441
442 return 0;
443}
444
445static void asr_sha_cra_exit(struct crypto_tfm *tfm)
446{
447 struct asr_sha_ctx *ctx = crypto_tfm_ctx(tfm);
448 memset(ctx, 0, sizeof(*ctx));
449}
450
451static inline void asr_sha_get(struct asr_bcm_sha *dd)
452{
453 mutex_lock(&dd->sha_lock);
454}
455
456static inline void asr_sha_put(struct asr_bcm_sha *dd)
457{
458 if(mutex_is_locked(&dd->sha_lock))
459 mutex_unlock(&dd->sha_lock);
460}
461
462static int asr_sha_init(struct ahash_request *req)
463{
464 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
465 struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
466 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
467 struct asr_bcm_sha *dd = asr_sha_local;
468
469 asr_sha_get(dd);
470
471 ctx->dd = dd;
472 memset(&ctx->md, 0, sizeof(ctx->md));
473 ctx->flags = 0;
474
475 switch (crypto_ahash_digestsize(tfm)) {
476 case MD5_DIGEST_SIZE:
477 ctx->flags |= SHA_FLAGS_MD5;
478 ctx->md.alg = TEE_ALG_MD5;
479 ctx->md.block_size = MD5_HMAC_BLOCK_SIZE;
480 break;
481 case SHA1_DIGEST_SIZE:
482 ctx->flags |= SHA_FLAGS_SHA1;
483 ctx->md.alg = TEE_ALG_SHA1;
484 ctx->md.block_size = SHA1_BLOCK_SIZE;
485 break;
486 case SHA224_DIGEST_SIZE:
487 ctx->flags |= SHA_FLAGS_SHA224;
488 ctx->md.alg = TEE_ALG_SHA224;
489 ctx->md.block_size = SHA224_BLOCK_SIZE;
490 break;
491 case SHA256_DIGEST_SIZE:
492 ctx->flags |= SHA_FLAGS_SHA256;
493 ctx->md.alg = TEE_ALG_SHA256;
494 ctx->md.block_size = SHA256_BLOCK_SIZE;
495 break;
496 case SHA384_DIGEST_SIZE:
497 ctx->flags |= SHA_FLAGS_SHA384;
498 ctx->md.alg = TEE_ALG_SHA384;
499 ctx->md.block_size = SHA384_BLOCK_SIZE;
500 break;
501 case SHA512_DIGEST_SIZE:
502 ctx->flags |= SHA_FLAGS_SHA512;
503 ctx->md.alg = TEE_ALG_SHA512;
504 ctx->md.block_size = SHA512_BLOCK_SIZE;
505 break;
506 default:
507 asr_sha_put(dd);
508 return -EINVAL;
509 }
510
511 ctx->bufcnt = 0;
512 ctx->flags |= SHA_FLAGS_INIT;
513
514 asr_sha_put(dd);
515 return 0;
516}
517
518static int asr_sha_update(struct ahash_request *req)
519{
520 int ret = 0;
521 struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
522 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
523
524 asr_sha_get(ctx->dd);
525 ctx->total = req->nbytes;
526 ctx->sg = req->src;
527 ctx->offset = 0;
528
529 ret = asr_sha_enqueue(req, SHA_OP_UPDATE);
530
531 asr_sha_put(ctx->dd);
532 return ret;
533}
534
535static int asr_sha_final(struct ahash_request *req)
536{
537 int ret = 0;
538 struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
539 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
540
541 asr_sha_get(ctx->dd);
542 ctx->flags |= SHA_FLAGS_FINAL;
543 if (ctx->flags & SHA_FLAGS_ERROR) {
544 asr_sha_put(ctx->dd);
545 return 0; /* uncompleted hash is not needed */
546 }
547 ret = asr_sha_enqueue(req, SHA_OP_FINAL);
548
549 asr_sha_put(ctx->dd);
550 return ret;
551}
552
553static int asr_sha_finup(struct ahash_request *req)
554{
555 struct asr_optee_sha_reqctx *optee_ctx = ahash_request_ctx(req);
556 struct asr_sha_reqctx *ctx = &optee_ctx->reqctx;
557 int err1, err2;
558
559 ctx->flags |= SHA_FLAGS_FINUP;
560
561 err1 = asr_sha_update(req);
562 if (err1 == -EINPROGRESS ||
563 (err1 == -EBUSY && (ahash_request_flags(req) &
564 CRYPTO_TFM_REQ_MAY_BACKLOG))) {
565 asr_sha_put(ctx->dd);
566 return err1;
567 }
568 /*
569 * final() has to be always called to cleanup resources
570 * even if udpate() failed, except EINPROGRESS
571 */
572 err2 = asr_sha_final(req);
573
574 return err1 ?: err2;
575}
576
577static int asr_sha_digest(struct ahash_request *req)
578{
579 return asr_sha_init(req) ?: asr_sha_finup(req);
580}
581
582static int asr_sha_export(struct ahash_request *req, void *out)
583{
584 const struct asr_optee_sha_reqctx *ctx = ahash_request_ctx(req);
585
586 memcpy(out, ctx, sizeof(*ctx));
587 return 0;
588}
589
590static int asr_sha_import(struct ahash_request *req, const void *in)
591{
592 struct asr_optee_sha_reqctx *ctx = ahash_request_ctx(req);
593
594 memcpy(ctx, in, sizeof(*ctx));
595 return 0;
596}
597
598static struct ahash_alg sha_algs[] = {
599 /* md5 */
600 {
601 .init = asr_sha_init,
602 .update = asr_sha_update,
603 .final = asr_sha_final,
604 .finup = asr_sha_finup,
605 .digest = asr_sha_digest,
606 .export = asr_sha_export,
607 .import = asr_sha_import,
608 .halg = {
609 .digestsize = MD5_DIGEST_SIZE,
610 .statesize = sizeof(struct asr_optee_sha_reqctx),
611 .base = {
612 .cra_name = "md5",
613 .cra_driver_name = "asr-md5",
614 .cra_priority = ASR_SHA_PRIORITY,
615 .cra_flags = CRYPTO_ALG_ASYNC,
616 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
617 .cra_ctxsize = sizeof(struct asr_sha_ctx),
618 .cra_alignmask = 0,
619 .cra_module = THIS_MODULE,
620 .cra_init = asr_sha_cra_init,
621 .cra_exit = asr_sha_cra_exit,
622 }
623 }
624 },
625
626 /* sha1 */
627 {
628 .init = asr_sha_init,
629 .update = asr_sha_update,
630 .final = asr_sha_final,
631 .finup = asr_sha_finup,
632 .digest = asr_sha_digest,
633 .export = asr_sha_export,
634 .import = asr_sha_import,
635 .halg = {
636 .digestsize = SHA1_DIGEST_SIZE,
637 .statesize = sizeof(struct asr_optee_sha_reqctx),
638 .base = {
639 .cra_name = "sha1",
640 .cra_driver_name = "asr-sha1",
641 .cra_priority = ASR_SHA_PRIORITY,
642 .cra_flags = CRYPTO_ALG_ASYNC,
643 .cra_blocksize = SHA1_BLOCK_SIZE,
644 .cra_ctxsize = sizeof(struct asr_sha_ctx),
645 .cra_alignmask = 0,
646 .cra_module = THIS_MODULE,
647 .cra_init = asr_sha_cra_init,
648 .cra_exit = asr_sha_cra_exit,
649 }
650 }
651 },
652
653 /* sha224 */
654 {
655 .init = asr_sha_init,
656 .update = asr_sha_update,
657 .final = asr_sha_final,
658 .finup = asr_sha_finup,
659 .digest = asr_sha_digest,
660 .export = asr_sha_export,
661 .import = asr_sha_import,
662 .halg = {
663 .digestsize = SHA224_DIGEST_SIZE,
664 .statesize = sizeof(struct asr_optee_sha_reqctx),
665 .base = {
666 .cra_name = "sha224",
667 .cra_driver_name = "asr-sha224",
668 .cra_priority = ASR_SHA_PRIORITY,
669 .cra_flags = CRYPTO_ALG_ASYNC,
670 .cra_blocksize = SHA224_BLOCK_SIZE,
671 .cra_ctxsize = sizeof(struct asr_sha_ctx),
672 .cra_alignmask = 0,
673 .cra_module = THIS_MODULE,
674 .cra_init = asr_sha_cra_init,
675 .cra_exit = asr_sha_cra_exit,
676 }
677 }
678 },
679
680 /* sha256 */
681 {
682 .init = asr_sha_init,
683 .update = asr_sha_update,
684 .final = asr_sha_final,
685 .finup = asr_sha_finup,
686 .digest = asr_sha_digest,
687 .export = asr_sha_export,
688 .import = asr_sha_import,
689 .halg = {
690 .digestsize = SHA256_DIGEST_SIZE,
691 .statesize = sizeof(struct asr_optee_sha_reqctx),
692 .base = {
693 .cra_name = "sha256",
694 .cra_driver_name = "asr-sha256",
695 .cra_priority = ASR_SHA_PRIORITY,
696 .cra_flags = CRYPTO_ALG_ASYNC,
697 .cra_blocksize = SHA256_BLOCK_SIZE,
698 .cra_ctxsize = sizeof(struct asr_sha_ctx),
699 .cra_alignmask = 0,
700 .cra_module = THIS_MODULE,
701 .cra_init = asr_sha_cra_init,
702 .cra_exit = asr_sha_cra_exit,
703 }
704 }
705 },
706
707 /* sha384 */
708 {
709 .init = asr_sha_init,
710 .update = asr_sha_update,
711 .final = asr_sha_final,
712 .finup = asr_sha_finup,
713 .digest = asr_sha_digest,
714 .export = asr_sha_export,
715 .import = asr_sha_import,
716 .halg = {
717 .digestsize = SHA384_DIGEST_SIZE,
718 .statesize = sizeof(struct asr_optee_sha_reqctx),
719 .base = {
720 .cra_name = "sha384",
721 .cra_driver_name = "asr-sha384",
722 .cra_priority = ASR_SHA_PRIORITY,
723 .cra_flags = CRYPTO_ALG_ASYNC,
724 .cra_blocksize = SHA384_BLOCK_SIZE,
725 .cra_ctxsize = sizeof(struct asr_sha_ctx),
726 .cra_alignmask = 0,
727 .cra_module = THIS_MODULE,
728 .cra_init = asr_sha_cra_init,
729 .cra_exit = asr_sha_cra_exit,
730 }
731 }
732 },
733
734 /* sha512 */
735 {
736 .init = asr_sha_init,
737 .update = asr_sha_update,
738 .final = asr_sha_final,
739 .finup = asr_sha_finup,
740 .digest = asr_sha_digest,
741 .export = asr_sha_export,
742 .import = asr_sha_import,
743 .halg = {
744 .digestsize = SHA512_DIGEST_SIZE,
745 .statesize = sizeof(struct asr_optee_sha_reqctx),
746 .base = {
747 .cra_name = "sha512",
748 .cra_driver_name = "asr-sha512",
749 .cra_priority = ASR_SHA_PRIORITY,
750 .cra_flags = CRYPTO_ALG_ASYNC,
751 .cra_blocksize = SHA512_BLOCK_SIZE,
752 .cra_ctxsize = sizeof(struct asr_sha_ctx),
753 .cra_alignmask = 0,
754 .cra_module = THIS_MODULE,
755 .cra_init = asr_sha_cra_init,
756 .cra_exit = asr_sha_cra_exit,
757 }
758 }
759 },
760};
761
762static void asr_sha_queue_task(unsigned long data)
763{
764 struct asr_bcm_sha *dd = (struct asr_bcm_sha *)data;
765
766 asr_sha_handle_queue(dd, NULL);
767}
768
769static int asr_sha_done(struct asr_bcm_sha *dd)
770{
771 int err = 0;
772 struct ahash_request *req = dd->req;
773 struct asr_optee_sha_reqctx *ctx = ahash_request_ctx(req);
774
775 sha_finish_req(ctx, 0);
776
777 return err;
778}
779
780static void asr_sha_done_task(unsigned long data)
781{
782 struct asr_bcm_sha *dd = (struct asr_bcm_sha *)data;
783
784 dd->is_async = true;
785 (void)dd->resume(dd);
786}
787
788static int hash_handle(int alg, uint8_t *in, uint32_t inlen, uint8_t *out)
789{
790 int ret = 0;
791 uint32_t outlen;
792 struct asr_optee_sha_reqctx ctx;
793
794 switch(alg) {
795 case TEE_ALG_SHA512:
796 outlen = HASH_LEN_SHA512;
797 break;
798 case TEE_ALG_SHA384:
799 outlen = HASH_LEN_SHA384;
800 break;
801 case TEE_ALG_SHA256:
802 outlen = HASH_LEN_SHA256;
803 break;
804 case TEE_ALG_SHA224:
805 outlen = HASH_LEN_SHA224;
806 break;
807 case TEE_ALG_SHA1:
808 outlen = HASH_LEN_SHA1;
809 break;
810 case TEE_ALG_MD5:
811 outlen = HASH_LEN_MD5;
812 break;
813 default:
814 printk("err: not support hash alg\n");
815 ret = -1;
816 goto exit;
817 }
818
819 ret = asrbcm_optee_acquire_hash_init(&ctx, &pta_sha_uuid, CMD_SHA_INIT, alg);
820 if (ret) {
821 ret = -1;
822 goto exit;
823 }
824
825 ret = asrbcm_optee_acquire_hash_update(&ctx, &pta_sha_uuid, CMD_SHA_UPDATE, alg, in, inlen);
826 if (ret) {
827 ret = -1;
828 goto exit;
829 }
830
831 ret = asrbcm_optee_acquire_hash_final(&ctx, &pta_sha_uuid, CMD_SHA_FINAL, alg, out, outlen);
832 if (ret) {
833 ret = -1;
834 goto exit;
835 }
836
837exit:
838 return ret;
839}
840
841static int tee_hwhash_func_verify(void)
842{
843 int ret = 0;
844 unsigned char out_sha256[32] = {0};
845 const struct {
846 const char *msg;
847 uint8_t hash[32];
848 } sha256_tests = {
849 "abc",
850 { 0xBA, 0x78, 0x16, 0xBF, 0x8F, 0x01,
851 0xCF, 0xEA, 0x41, 0x41, 0x40, 0xDE,
852 0x5D, 0xAE, 0x22, 0x23, 0xB0, 0x03,
853 0x61, 0xA3, 0x96, 0x17, 0x7A, 0x9C,
854 0xB4, 0x10, 0xFF, 0x61, 0xF2, 0x00,
855 0x15, 0xAD
856 }
857 };
858
859 ret = hash_handle(TEE_ALG_SHA256, (uint8_t *)sha256_tests.msg, strlen(sha256_tests.msg), out_sha256);
860 if (ret)
861 return ret;
862
863 if (memcmp(out_sha256, sha256_tests.hash, sizeof(out_sha256))) {
864 return -1;
865 }
866
867 return 0;
868}
869
870// #define ASR_BCM_SHA_TEST
871
872#ifdef ASR_BCM_SHA_TEST
873static int bcm_sha_test(void);
874#endif
875
876int asr_bcm_sha_register(struct asr_bcm_dev *bcm_dd)
877{
878 int err, i, j;
879 struct asr_bcm_sha *sha_dd;
880
881 sha_dd = &bcm_dd->asr_sha;
882 sha_dd->dev = bcm_dd->dev;
883
884 asr_sha_local = sha_dd;
885
886 spin_lock_init(&sha_dd->lock);
887 mutex_init(&sha_dd->sha_lock);
888 mutex_init(&sha_dd->queue_lock);
889 tasklet_init(&sha_dd->done_task, asr_sha_done_task,
890 (unsigned long)sha_dd);
891 tasklet_init(&sha_dd->queue_task, asr_sha_queue_task,
892 (unsigned long)sha_dd);
893 crypto_init_queue(&sha_dd->queue, ASR_SHA_QUEUE_LENGTH);
894
895 /* don't register sha if hash verify err in tos */
896 err = tee_hwhash_func_verify();
897 if (err)
898 return err;
899
900 for (i = 0; i < ARRAY_SIZE(sha_algs); i++) {
901 err = crypto_register_ahash(&sha_algs[i]);
902 if (err)
903 goto err_sha_algs;
904 }
905
906#ifdef ASR_BCM_SHA_TEST
907 bcm_sha_test();
908#endif
909
910 return 0;
911
912err_sha_algs:
913 for (j = 0; j < i; j++)
914 crypto_unregister_ahash(&sha_algs[j]);
915
916 return err;
917}
918EXPORT_SYMBOL_GPL(asr_bcm_sha_register);
919
920int asr_bcm_sha_unregister(struct asr_bcm_dev *bcm_dd)
921{
922 int i;
923 struct asr_bcm_sha *sha_dd = &bcm_dd->asr_sha;
924
925
926 for (i = 0; i < ARRAY_SIZE(sha_algs); i++)
927 crypto_unregister_ahash(&sha_algs[i]);
928
929 tasklet_kill(&sha_dd->queue_task);
930 tasklet_kill(&sha_dd->done_task);
931
932 return 0;
933}
934EXPORT_SYMBOL_GPL(asr_bcm_sha_unregister);
935
936
937
938#ifdef ASR_BCM_SHA_TEST
939
940static int bcm_sha_test(void)
941{
942 int ret = 0;
943 uint32_t i;
944
945 const struct {
946 const char *msg;
947 uint8_t hash[20];
948 } sha1_tests[] = {
949 {
950 "abc",
951 { 0xa9, 0x99, 0x3e, 0x36, 0x47, 0x06,
952 0x81, 0x6a, 0xba, 0x3e, 0x25, 0x71,
953 0x78, 0x50, 0xc2, 0x6c, 0x9c, 0xd0,
954 0xd8, 0x9d
955 }
956 },
957 {
958 "asjhsdjljfdsdjjkdfwyqeuwouzxkmcxjkmwqdsjklfdfjlkdfkfs" \
959 "fkjlfskjdflioherfjjfdjkfdnkfdfdojjodfjdfjflj;sljjlfkkl" \
960 "nfnkgbhhoigfhigfopojpfjojpoffkjlfskjdflioherfjjfdjkfdn" \
961 "kfdfdojjodfjdfjfljnfnkgbhhoigfhigfoponfnkgbhhoigfhigfopojpfjo",
962 {
963 0x93, 0x84, 0x7f, 0x98, 0x22, 0x5e,
964 0x6d, 0xf2, 0x09, 0x1c, 0xc9, 0xac,
965 0xbb, 0x5d, 0x00, 0x2d, 0x64, 0x81,
966 0xe3, 0xcd
967 }
968 },
969 {
970 "asjhsdjljfdsdjjkdfwyqeuwouzxkmcxjkmwqdsjklfdfjlkdfkfs" \
971 "fkjlfskjdflioherfjjfdjkfdnkfdfdojjodfjdfjflj;sljjlfkkl" \
972 "nfnkgbhhoigfhigfopojpfjojpoffkjlfskjdflioherfjjfdjkfdn" \
973 "kfdfdojjodfjdfjfljnfnkgbhhoigfhigfoponfnkgbhhoigfhigfopojpfjoewiroiowiod",
974 {
975 0x6a, 0x66, 0xc2, 0x87, 0x84, 0x36,
976 0x14, 0x90, 0x99, 0x03, 0x90, 0xf0,
977 0xaa, 0x7e, 0xbd, 0xc7, 0xdb, 0x38,
978 0x54, 0x09
979 }
980 },
981 {
982 "asjhsdjljfdsdjjkdfwyqeuwouzxkmcxjkmwqds"
983 "jklfdfjlkdfkfsfkjlfskjdflioherfjjfdjkfd"
984 "nkfdfdojjodfjdfjflj;sljjlfkklnfnkgbhhoi"
985 "gfhigfopojpfjojpoffkjlfskjdflioherfjjfd"
986 "jkfdnkfdfdojjodfjdfjfljnfnkgbhhoigfhigf"
987 "oponfnkgbhhoigfhigfopojpfjoewiroiowiods"
988 "djkisijdknknkskdnknflnnesniewinoinknmdn"
989 "kknknsdnjjfsnnkfnkknslnklknfnknkflksnlk"
990 "lskldklklklnmlflmlmlfmlfml",
991 {
992 0xc4, 0x53, 0xca, 0x24, 0xfa, 0xe5,
993 0x39, 0x53, 0x08, 0x8c, 0x57, 0x1a,
994 0x96, 0xe9, 0x64, 0x7f, 0xd5, 0xf9,
995 0x13, 0x91
996 }
997 }
998 };
999
1000 struct asr_optee_sha_reqctx ctx1;
1001 struct asr_optee_sha_reqctx ctx2;
1002 struct asr_optee_sha_reqctx ctx3;
1003 struct asr_optee_sha_reqctx ctx4;
1004 unsigned char out_sha1_1[20] = {0};
1005 unsigned char out_sha1_2[20] = {0};
1006 unsigned char out_sha1_3[20] = {0};
1007 unsigned char out_sha1_4[20] = {0};
1008
1009 ret = asrbcm_optee_acquire_hash_init(&ctx1, &pta_sha_uuid, CMD_SHA_INIT, TEE_ALG_SHA1);
1010 if (ret) {
1011 return ret;
1012 }
1013
1014 ret = asrbcm_optee_acquire_hash_init(&ctx2, &pta_sha_uuid, CMD_SHA_INIT, TEE_ALG_SHA1);
1015 if (ret) {
1016 return ret;
1017 }
1018
1019 ret = asrbcm_optee_acquire_hash_update(&ctx1, &pta_sha_uuid, CMD_SHA_UPDATE, TEE_ALG_SHA1,
1020 (uint8_t *)sha1_tests[0].msg, strlen(sha1_tests[0].msg));
1021 if (ret) {
1022 return ret;
1023 }
1024
1025 ret = asrbcm_optee_acquire_hash_init(&ctx3, &pta_sha_uuid, CMD_SHA_INIT, TEE_ALG_SHA1);
1026 if (ret) {
1027 return ret;
1028 }
1029
1030 if (ret) {
1031 return ret;
1032 }
1033
1034 ret = asrbcm_optee_acquire_hash_update(&ctx2, &pta_sha_uuid, CMD_SHA_UPDATE, TEE_ALG_SHA1,
1035 (uint8_t *)(((uint32_t)sha1_tests[1].msg)+10), strlen(sha1_tests[1].msg) - 10);
1036 if (ret) {
1037 return ret;
1038 }
1039
1040 ret = asrbcm_optee_acquire_hash_final(&ctx1, &pta_sha_uuid, CMD_SHA_FINAL, TEE_ALG_SHA1,
1041 out_sha1_1, sizeof(out_sha1_1));
1042 if (ret) {
1043 return ret;
1044 }
1045
1046 ret = asrbcm_optee_acquire_hash_update(&ctx3, &pta_sha_uuid, CMD_SHA_UPDATE, TEE_ALG_SHA1,
1047 (uint8_t *)sha1_tests[2].msg, 25);
1048 if (ret) {
1049 return ret;
1050 }
1051
1052 ret = asrbcm_optee_acquire_hash_init(&ctx4, &pta_sha_uuid, CMD_SHA_INIT, TEE_ALG_SHA1);
1053 if (ret) {
1054 return ret;
1055 }
1056
1057 ret = asrbcm_optee_acquire_hash_final(&ctx2, &pta_sha_uuid, CMD_SHA_FINAL, TEE_ALG_SHA1,
1058 out_sha1_2, sizeof(out_sha1_2));
1059 if (ret) {
1060 return ret;
1061 }
1062
1063 ret = asrbcm_optee_acquire_hash_update(&ctx3, &pta_sha_uuid, CMD_SHA_UPDATE, TEE_ALG_SHA1,
1064 (uint8_t *)(((uint32_t)sha1_tests[2].msg)+25), strlen(sha1_tests[2].msg)-25);
1065 if (ret) {
1066 return ret;
1067 }
1068
1069 ret = asrbcm_optee_acquire_hash_final(&ctx3, &pta_sha_uuid, CMD_SHA_FINAL, TEE_ALG_SHA1,
1070 out_sha1_3, sizeof(out_sha1_3));
1071 if (ret) {
1072 return ret;
1073 }
1074
1075 ret = asrbcm_optee_acquire_hash_update(&ctx4, &pta_sha_uuid, CMD_SHA_UPDATE, TEE_ALG_SHA1,
1076 (uint8_t *)sha1_tests[3].msg, 43);
1077 if (ret) {
1078 return ret;
1079 }
1080 ret = asrbcm_optee_acquire_hash_update(&ctx4, &pta_sha_uuid, CMD_SHA_UPDATE, TEE_ALG_SHA1,
1081 (uint8_t *)(((uint32_t)sha1_tests[3].msg)+43), strlen(sha1_tests[3].msg)-43);
1082 if (ret) {
1083 return ret;
1084 }
1085
1086 ret = asrbcm_optee_acquire_hash_final(&ctx4, &pta_sha_uuid, CMD_SHA_FINAL, TEE_ALG_SHA1,
1087 out_sha1_4, sizeof(out_sha1_4));
1088 if (ret) {
1089 return ret;
1090 }
1091
1092 if (memcmp(out_sha1_1, sha1_tests[0].hash, sizeof(out_sha1_1))) {
1093 printk("sha1 test 0 failed");
1094 } else {
1095 printk("sha1 test 0 pass");
1096 }
1097 if (memcmp(out_sha1_2, sha1_tests[1].hash, sizeof(out_sha1_2))) {
1098 printk("sha1 test 1 failed");
1099 } else {
1100 printk("sha1 test 1 pass");
1101 }
1102 if (memcmp(out_sha1_3, sha1_tests[2].hash, sizeof(out_sha1_3))) {
1103 printk("sha1 test 2 failed");
1104 } else {
1105 printk("sha1 test 2 pass");
1106 }
1107 if (memcmp(out_sha1_4, sha1_tests[3].hash, sizeof(out_sha1_4))) {
1108 printk("sha1 test 3 failed");
1109 } else {
1110 printk("sha1 test 4 pass");
1111 }
1112
1113 return 0;
1114}
1115#endif
1116
1117MODULE_LICENSE("GPL");
1118MODULE_AUTHOR("wangyonggan <yongganwang@asrmicro.com>");
1119MODULE_DESCRIPTION("ASR bcm sha driver");