blob: a17382ab52cb8c5d891034668c0a456c29793423 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001#include <linux/module.h>
2#include <linux/slab.h>
3#include <linux/err.h>
4#include <linux/clk-provider.h>
5#include <linux/clk.h>
6#include <linux/io.h>
7#include <linux/hw_random.h>
8#include <linux/platform_device.h>
9#include <linux/of_device.h>
10#include <linux/device.h>
11#include <linux/init.h>
12#include <crypto/hmac.h>
13#include <crypto/sha.h>
14#include "asr-te200.h"
15#include "asr-sha.h"
16
17// #define ASR_TE200_SHA_TEST
18
19static struct asr_te200_sha *asr_sha_local = NULL;
20static struct mutex hash_lock = __MUTEX_INITIALIZER(hash_lock);
21
22static inline u32 asr_sha_read(struct asr_te200_sha *dd, u32 offset)
23{
24 u32 value = readl_relaxed(dd->io_base + offset);
25
26 return value;
27}
28
29static inline void asr_sha_write(struct asr_te200_sha *dd,
30 u32 offset, u32 value)
31{
32 writel_relaxed(value, dd->io_base + offset);
33}
34
35/* ------- te200 sha hardware operation -------- */
36static int hash_clock_switch(struct asr_te200_sha *dd, int enable)
37{
38 uint32_t value;
39
40 value = asr_sha_read(dd, TE200_CLOCK_CTRL);
41 if (enable) {
42 value |= HASH_CLK_EN;
43 } else {
44 value &= ~HASH_CLK_EN;
45 }
46
47 asr_sha_write(dd, TE200_CLOCK_CTRL, value);
48
49 return 0;
50}
51
52static int hash_start_run(struct asr_te200_sha *dd)
53{
54 uint32_t value;
55 value = asr_sha_read(dd, TE200_SHASH_CTRL);
56 value |= HASH_RUN;
57 asr_sha_write(dd, TE200_SHASH_CTRL, value);
58 return 0;
59}
60
61static int hash_wait_intr(struct asr_te200_sha *dd)
62{
63 int ret = 0;
64 uint32_t value;
65 uint32_t time_start;
66 uint32_t clk_val;
67 clk_val = asr_sha_read(dd, TE200_CLOCK_CTRL);
68
69 time_start = jiffies;
70 value = asr_sha_read(dd, TE200_SHASH_INTR_STAT);
71
72 while (1) {
73 value = asr_sha_read(dd, TE200_SHASH_INTR_STAT);
74
75 if (value & HASH_INVALID_CMD) {
76 dev_err(dd->dev, "invallid cmd\n");
77 ret = -1;
78 break;
79 }
80
81 if (value & HASH_BUS_ERROR) {
82 dev_err(dd->dev, "bus err\n");
83 ret = -1;
84 break;
85 }
86
87 if ((jiffies - time_start) > 500) {
88 dev_err(dd->dev, "wait intr timeout !\n");
89 ret = -1;
90 break;
91 }
92
93 if (value & HASH_CMD_INTR) {
94 break;
95 }
96 }
97
98 value = asr_sha_read(dd, TE200_SHASH_INTR_STAT);
99 value |= HASH_CMD_INTR;
100 asr_sha_write(dd, TE200_SHASH_INTR_STAT, value);
101 return ret;
102}
103
104static inline void sha_cache_operation(void *addr, int size)
105{
106 __cpuc_flush_dcache_area(addr, size);
107}
108
109static int _hash_op_init(struct asr_sha_reqctx *reqctx, int alg, uint8_t *ext_iv)
110{
111 int ret;
112 uint32_t cmd = 0;
113 uint32_t ext_iv_phys;
114 struct asr_te200_sha *dd = reqctx->dd;
115 te200_hash_context_t *ctx = &reqctx->hash_ctx;
116
117 hash_clock_switch(dd, 1);
118
119 if (ext_iv) {
120 cmd |= HASH_INIT_CMD | HASH_SET_EXT_IV | HASH_PARAM_IS_ADDR | HASH_INTER_TRIGGERD;
121 /* Set initial length */
122 if (ctx->total_bits_num != 0)
123 cmd |= 0x4;
124 } else {
125 cmd |= HASH_INIT_CMD | HASH_PARAM_IS_ADDR | HASH_INTER_TRIGGERD;
126 }
127
128 switch (alg) {
129 case HASH_SHA1:
130 cmd &= HASH_MODE_SHA1;
131 break;
132 case HASH_SHA224:
133 cmd |= HASH_MODE_SHA224;
134 break;
135 case HASH_SHA256:
136 cmd |= HASH_MODE_SHA256;
137 break;
138 default:
139 hash_clock_switch(dd, 0);
140 return -EINVAL;
141 }
142
143 asr_sha_write(dd, TE200_SHASH_QUEUE, cmd);
144 if (ext_iv) {
145 ext_iv_phys = (uint32_t)virt_to_phys((void *)ext_iv);
146 sha_cache_operation((void *)ext_iv, 32);
147 asr_sha_write(dd, TE200_SHASH_QUEUE, ext_iv_phys);
148 /* Set HASH total bits length, split 64 bits into two parts, 32 bits for
149 * each */
150 if (ctx->total_bits_num != 0) {
151 asr_sha_write(dd, TE200_SHASH_QUEUE, (ctx->total_bits_num & 0xFFFFFFFF));
152 asr_sha_write(dd, TE200_SHASH_QUEUE, (ctx->total_bits_num >> 0x20));
153 }
154 }
155
156 hash_start_run(dd);
157 ret = hash_wait_intr(dd);
158 reqctx->hash_ctx.finish_flag = 1;
159
160 hash_clock_switch(dd, 0);
161 return ret;
162}
163
164static int _hash_op_proc(struct asr_sha_reqctx *reqctx, const uint8_t *src, size_t size)
165{
166 int ret = 0;
167 uint32_t cmd = 0;
168 uint32_t src_phys;
169 struct asr_te200_sha *dd = reqctx->dd;
170 te200_hash_context_t *ctx = &reqctx->hash_ctx;
171 size_t input_data_len = 0;
172 uint32_t old_extra_len = ctx->count;
173
174 hash_clock_switch(dd, 1);
175
176 /* Extra data bytes number */
177 ctx->count = (size + old_extra_len) % HASH_BUF_LEN;
178 if (size + old_extra_len >= HASH_BUF_LEN) {
179 /* First handle old extra data, then the new input data */
180 if (old_extra_len != 0) {
181 src_phys = (uint32_t)virt_to_phys((void *)ctx->extra_data);
182 sha_cache_operation((void *)ctx->extra_data, old_extra_len);
183
184 cmd = HASH_PROCESS_CMD | HASH_INTER_TRIGGERD;
185 asr_sha_write(dd, TE200_SHASH_QUEUE, cmd);
186
187 asr_sha_write(dd, TE200_SHASH_QUEUE, src_phys);
188 asr_sha_write(dd, TE200_SHASH_QUEUE, old_extra_len);
189
190 hash_start_run(dd);
191 ret = hash_wait_intr(dd);
192 if (ret)
193 goto err;
194 ctx->total_bits_num += old_extra_len * 8;
195 }
196
197 cmd = HASH_PROCESS_CMD | HASH_INTER_TRIGGERD;
198 input_data_len = size - ctx->count;
199
200 src_phys = virt_to_phys((void *)src);
201 sha_cache_operation((void *)src, input_data_len);
202 asr_sha_write(dd, TE200_SHASH_QUEUE, cmd);
203 asr_sha_write(dd, TE200_SHASH_QUEUE, (uint32_t)src_phys);
204 asr_sha_write(dd, TE200_SHASH_QUEUE, input_data_len);
205
206 hash_start_run(dd);
207 ret = hash_wait_intr(dd);
208 if (ret)
209 goto err;
210
211 /* Total data bits number */
212 ctx->total_bits_num += input_data_len * 8;
213 /* Save new extra data */
214 memset(ctx->extra_data, 0, sizeof( ctx->extra_data ));
215 memcpy(ctx->extra_data, (src + size - ctx->count), ctx->count);
216 } else {
217 /* If ilen + old_extra_len < HASH_BUF_LEN */
218 /* Save input data and return. */
219 memcpy(ctx->extra_data + old_extra_len, src, size);
220 }
221 ret = 0;
222
223err:
224 hash_clock_switch(dd, 0);
225 return ret;
226}
227
228static int _hash_op_finish(struct asr_sha_reqctx *reqctx,
229 uint8_t *out, uint32_t out_size, int padding)
230{
231 int ret = 0;
232 uint32_t cmd = 0;
233 uint32_t out_phys;
234 struct asr_te200_sha *dd = reqctx->dd;
235 te200_hash_context_t *ctx = &reqctx->hash_ctx;
236 uint32_t extra_data_phys;
237
238 /* filter uninitialized finish request */
239 if ( !reqctx->hash_ctx.finish_flag ) {
240 return ret;
241 }
242
243 hash_clock_switch(dd, 1);
244
245 if (padding == 0) {
246 cmd = HASH_FINISH_CMD | HASH_INTER_TRIGGERD;
247 ctx->hash_temp_valid = 1;
248 ctx->finish_flag = 0;
249 } else {
250 /* If extra data count is not zero, execute HASH process command first */
251 if (ctx->count != 0) {
252 cmd = HASH_PROCESS_CMD | HASH_INTER_TRIGGERD;
253 asr_sha_write(dd, TE200_SHASH_QUEUE, cmd);
254
255 extra_data_phys = (uint32_t)virt_to_phys((void *)ctx->extra_data);
256 sha_cache_operation((void *)ctx->extra_data, ctx->count);
257 asr_sha_write(dd, TE200_SHASH_QUEUE, extra_data_phys);
258 asr_sha_write(dd, TE200_SHASH_QUEUE, ctx->count);
259 hash_start_run(dd);
260 ret = hash_wait_intr(dd);
261 if (ret)
262 goto err;
263 }
264 cmd = HASH_FINISH_CMD | HASH_PADDING | HASH_INTER_TRIGGERD;
265 }
266
267 out_phys = virt_to_phys((void *)out);
268 sha_cache_operation((void *)out, out_size);
269
270 asr_sha_write(dd, TE200_SHASH_QUEUE, cmd);
271
272 asr_sha_write(dd, TE200_SHASH_QUEUE, (uint32_t)out_phys);
273
274 hash_start_run(dd);
275 ret = hash_wait_intr(dd);
276 if (ret)
277 goto err;
278
279 ret = 0;
280err:
281 hash_clock_switch(dd, 0);
282 return ret;
283}
284
285static struct asr_sha_reqctx *_g_sha_ctx = NULL;
286#define GET_HASH_LEN( reqctx ) \
287 ( ( reqctx->hash_ctx.alg == HASH_SHA1 ) \
288 ? 20 \
289 : ( reqctx->hash_ctx.alg == HASH_SHA224 ) \
290 ? 28 \
291 : ( reqctx->hash_ctx.alg == HASH_SHA256 ) \
292 ? 32 : 0)
293
294static int hash_op_init(struct asr_sha_reqctx *reqctx, int alg)
295{
296 int ret = 0;
297 unsigned char garbage[64] = {0};
298 uint32_t hash_temp_len;
299
300 mutex_lock(&hash_lock);
301
302 if (_g_sha_ctx != reqctx) {
303 /* First finish old session (_g_sha_ctx), then load new session(ctx) */
304 if (_g_sha_ctx != NULL) {
305 hash_temp_len = GET_HASH_LEN(_g_sha_ctx);
306 if (hash_temp_len == 0) {
307 ret = -1;
308 goto exit;
309 }
310 ret = _hash_op_finish(_g_sha_ctx, _g_sha_ctx->hash_ctx.hash_temp, hash_temp_len, 0 );
311 _g_sha_ctx = NULL;
312 if (ret) {
313 printk("swap out previously context failed");
314 goto exit;
315 }
316 }
317 } else {
318 /*
319 * This session re-start, flush garbage data. before execute
320 * finish command must check if it's finish flag is set,
321 * if not no need to excecute finish command
322 */
323 if ( _g_sha_ctx != NULL ) {
324 hash_temp_len = GET_HASH_LEN(_g_sha_ctx);
325 if (hash_temp_len == 0) {
326 ret = -1;
327 goto exit;
328 }
329 ret = _hash_op_finish( _g_sha_ctx, garbage, hash_temp_len, 1 );
330 _g_sha_ctx = NULL;
331 if (ret) {
332 printk("hash finish error during switching context!");
333 goto exit;
334 }
335 }
336 }
337
338 memset(&reqctx->hash_ctx, 0, sizeof(reqctx->hash_ctx));
339 reqctx->hash_ctx.alg = alg;
340 ret = _hash_op_init(reqctx, alg, NULL);
341 if (ret) {
342 printk( " execute hash init failed when te200 hash init" );
343 goto exit;
344 }
345
346 _g_sha_ctx = reqctx;
347 ret = 0;
348
349exit:
350 mutex_unlock(&hash_lock);
351 return ret;
352}
353
354static int hash_op_proc(struct asr_sha_reqctx *reqctx, const uint8_t *src, size_t size)
355{
356 int ret = 0;
357 uint32_t hash_temp_len;
358
359 mutex_lock(&hash_lock);
360
361 if (reqctx == NULL) {
362 ret = -1;
363 goto exit;
364 }
365
366 /* Multi-session */
367 if ( _g_sha_ctx != reqctx ) {
368 /* First finish old session (_g_sha_ctx), then load new session(ctx) */
369 if (_g_sha_ctx != NULL) {
370 hash_temp_len = GET_HASH_LEN(_g_sha_ctx);
371 if (hash_temp_len == 0) {
372 ret = -1;
373 goto exit;
374 }
375 ret = _hash_op_finish( _g_sha_ctx, _g_sha_ctx->hash_ctx.hash_temp, hash_temp_len, 0 );
376 _g_sha_ctx = NULL;
377 if (ret) {
378 printk("hash finish error during switching context!");
379 goto exit;
380 }
381 }
382
383 /* Re-initialize */
384 /* Execute te200 HASH_init command, load hash intermediate data */
385 hash_temp_len = GET_HASH_LEN( reqctx );
386 if ( reqctx->hash_ctx.hash_temp_valid == 1 ) {
387 ret = _hash_op_init(reqctx, reqctx->hash_ctx.alg, reqctx->hash_ctx.hash_temp);
388 } else {
389 ret = _hash_op_init(reqctx, reqctx->hash_ctx.alg, NULL);
390 }
391 if ( ret != 0 ) {
392 printk("execute hash init failed when update, reason: %x", ret);
393 goto exit;
394 }
395 _g_sha_ctx = reqctx;
396 }
397
398 /* Execute te200 HASH_process command */
399 ret = _hash_op_proc(reqctx, src, size);
400 if ( ret != 0 ) {
401 printk("execute hash process failed when update, reason: %x", ret);
402 goto exit;
403 }
404
405 ret = 0;
406
407exit:
408 mutex_unlock(&hash_lock);
409 return ret;
410}
411
412static int hash_op_finish(struct asr_sha_reqctx *reqctx, uint8_t *out, uint32_t out_size)
413{
414 int ret = 0;
415 uint32_t hash_temp_len;
416
417 mutex_lock(&hash_lock);
418
419 if ((reqctx == NULL) || (NULL == out)) {
420 printk( "context might probably not initialised!!" );
421 ret = -1;
422 goto exit;
423 }
424
425 if ( _g_sha_ctx == reqctx ) {
426 /* even though invoke hash_finish_req right after _hash_op_init
427 should get a default hash ouput*/
428
429 if ( !reqctx->hash_ctx.finish_flag ) {
430 if ( reqctx->hash_ctx.hash_temp_valid == 1 ) {
431 ret = _hash_op_init(reqctx, reqctx->hash_ctx.alg, reqctx->hash_ctx.hash_temp);
432 } else {
433 ret = _hash_op_init(reqctx, reqctx->hash_ctx.alg, NULL);
434 }
435 if ( ret != 0 ) {
436 printk("execute hash init failed when finish, reason: %x", ret);
437 goto exit;
438 }
439 }
440
441 ret = _hash_op_finish( reqctx, out, out_size, 1 );
442 } else {
443 /* when finished the session must check it's finish flag first, if not
444 * set don't need to finish it */
445 if ( _g_sha_ctx != NULL ) {
446 /* Save current session, then load new session */
447 hash_temp_len = GET_HASH_LEN(_g_sha_ctx);
448 if (hash_temp_len == 0) {
449 ret = -1;
450 goto exit;
451 }
452 ret = _hash_op_finish( _g_sha_ctx, _g_sha_ctx->hash_ctx.hash_temp, hash_temp_len, 0 );
453 _g_sha_ctx = NULL;
454 if ( ret != 0 ) {
455 printk("hash finish error during switching context!");
456 goto exit;
457 }
458 }
459
460 if ( reqctx->hash_ctx.hash_temp_valid == 1 ) {
461 ret = _hash_op_init(reqctx, reqctx->hash_ctx.alg, reqctx->hash_ctx.hash_temp);
462 } else {
463 ret = _hash_op_init(reqctx, reqctx->hash_ctx.alg, NULL);
464 }
465 if ( ret != 0 ) {
466 printk("execute hash init failed when finish, reason: %x", ret);
467 goto exit;
468 }
469
470 _g_sha_ctx = reqctx;
471 ret = _hash_op_finish( reqctx, out, out_size, 1 );
472 }
473
474 _g_sha_ctx = NULL;
475
476 ret = 0;
477
478exit:
479 mutex_unlock(&hash_lock);
480 return ret;
481}
482
483int asr_te200_hash_init(struct asr_sha_reqctx *reqctx, int alg)
484{
485 reqctx->dd = asr_sha_local;
486
487 if (!reqctx->dd) {
488 return -1;
489 }
490 return hash_op_init(reqctx, alg);
491}
492
493int asr_te200_hash_proc(struct asr_sha_reqctx *reqctx, const uint8_t *src, size_t size)
494{
495 int ret;
496 uint8_t *psrc;
497 reqctx->dd = asr_sha_local;
498
499 if (!reqctx->dd) {
500 return -1;
501 }
502
503 psrc = kmalloc(size, GFP_KERNEL);
504 if (!psrc) {
505 return -1;
506 }
507 memcpy(psrc, (void *)src, size);
508
509 ret = hash_op_proc(reqctx, psrc, size);
510 kfree(psrc);
511
512 return ret;
513}
514
515int asr_te200_hash_finish(struct asr_sha_reqctx *reqctx, uint8_t *out, uint32_t out_size)
516{
517 int ret;
518 /* Avoid cache caherence problems caused by out variables being optimized */
519 uint8_t hash[64] __aligned(64) = {0};
520 reqctx->dd = asr_sha_local;
521
522 if (!reqctx->dd) {
523 return -1;
524 }
525 ret = hash_op_finish(reqctx, hash, out_size);
526 memcpy(out, hash, out_size);
527
528 return ret;
529
530}
531/* ------- end -------- */
532
533static size_t asr_sha_append_sg(struct asr_sha_reqctx *ctx)
534{
535 size_t count;
536
537 while ((ctx->bufcnt < ctx->buflen) && ctx->total) {
538 count = min(ctx->sg->length - ctx->offset, ctx->total);
539 count = min(count, ctx->buflen - ctx->bufcnt);
540
541 if (count <= 0) {
542 /*
543 * Check if count <= 0 because the buffer is full or
544 * because the sg length is 0. In the latest case,
545 * check if there is another sg in the list, a 0 length
546 * sg doesn't necessarily mean the end of the sg list.
547 */
548 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
549 ctx->sg = sg_next(ctx->sg);
550 continue;
551 } else {
552 break;
553 }
554 }
555
556 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
557 ctx->offset, count, 0);
558
559 ctx->bufcnt += count;
560 ctx->offset += count;
561 ctx->total -= count;
562
563 if (ctx->offset == ctx->sg->length) {
564 ctx->sg = sg_next(ctx->sg);
565 if (ctx->sg)
566 ctx->offset = 0;
567 else
568 ctx->total = 0;
569 }
570 }
571
572 return 0;
573}
574
575static int asr_sha_done(struct asr_te200_sha *dd);
576
577static int asr_sha_handle_queue(struct asr_te200_sha *dd,
578 struct ahash_request *req)
579{
580 struct crypto_async_request *async_req, *backlog;
581 struct asr_sha_ctx *ctx;
582 unsigned long flags;
583 bool start_async;
584 int err = 0, ret = 0;
585
586 spin_lock_irqsave(&dd->lock, flags);
587 if (req)
588 ret = ahash_enqueue_request(&dd->queue, req);
589
590 if (SHA_FLAGS_BUSY & dd->flags) {
591 spin_unlock_irqrestore(&dd->lock, flags);
592 return ret;
593 }
594
595 backlog = crypto_get_backlog(&dd->queue);
596 async_req = crypto_dequeue_request(&dd->queue);
597 if (async_req)
598 dd->flags |= SHA_FLAGS_BUSY;
599
600 spin_unlock_irqrestore(&dd->lock, flags);
601
602 if (!async_req) {
603 return ret;
604 }
605
606 if (backlog)
607 backlog->complete(backlog, -EINPROGRESS);
608
609 ctx = crypto_tfm_ctx(async_req->tfm);
610
611 dd->req = ahash_request_cast(async_req);
612 start_async = (dd->req != req);
613 dd->is_async = start_async;
614 dd->force_complete = false;
615
616 /* WARNING: ctx->start() MAY change dd->is_async. */
617 err = ctx->start(dd);
618 return (start_async) ? ret : err;
619}
620
621static int asr_sha_enqueue(struct ahash_request *req, unsigned int op)
622{
623 struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
624 struct asr_te200_sha *dd = ctx->dd;
625
626 ctx->op = op;
627
628 return asr_sha_handle_queue(dd, req);
629}
630
631static void asr_sha_copy_ready_hash(struct ahash_request *req)
632{
633 struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
634
635 if (!req->result)
636 return;
637
638 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
639 case SHA_FLAGS_SHA1:
640 memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
641 break;
642 case SHA_FLAGS_SHA224:
643 memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE);
644 break;
645 case SHA_FLAGS_SHA256:
646 memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
647 break;
648 default:
649 return;
650 }
651}
652
653static inline int asr_sha_complete(struct asr_te200_sha *dd, int err)
654{
655 struct ahash_request *req = dd->req;
656 struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
657
658 dd->flags &= ~(SHA_FLAGS_BUSY);
659 ctx->flags &= ~(SHA_FLAGS_FINAL);
660
661 if ((dd->is_async || dd->force_complete) && req->base.complete)
662 req->base.complete(&req->base, err);
663
664 /* handle new request */
665 tasklet_schedule(&dd->queue_task);
666
667 return err;
668}
669
670static int asr_sha_buff_init(struct asr_te200_sha *dd, uint32_t len)
671{
672 struct ahash_request *req = dd->req;
673 struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
674
675 ctx->buffer = (void *)__get_free_pages(GFP_KERNEL, get_order(len));
676 if (!ctx->buffer) {
677 dev_err(dd->dev, "unable to alloc pages.\n");
678 return -ENOMEM;
679 }
680
681 ctx->buflen = PAGE_SIZE << get_order(len);
682
683 return 0;
684}
685
686static void asr_sha_buff_cleanup(struct asr_te200_sha *dd, uint32_t len)
687{
688 struct ahash_request *req = dd->req;
689 struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
690
691 free_pages((unsigned long)ctx->buffer, get_order(len));
692 ctx->buflen = 0;
693}
694
695static int sha_init_req(struct asr_sha_reqctx *ctx)
696{
697 int ret = 0;
698
699 /* hardware: hash init */
700 ret = hash_op_init(ctx, ctx->alg);
701 if (ret)
702 return -EINVAL;
703 return 0;
704}
705
706static int sha_update_req(struct asr_sha_reqctx *ctx)
707{
708 int ret = 0;
709 int bufcnt;
710 uint32_t buflen = ctx->total;
711
712 ret = asr_sha_buff_init(ctx->dd, ctx->total);
713 if (ret)
714 return -ENOMEM;
715
716 asr_sha_append_sg(ctx);
717 bufcnt = ctx->bufcnt;
718 ctx->bufcnt = 0;
719
720 /* hashware: hash process */
721 ret = hash_op_proc(ctx, ctx->buffer, bufcnt);
722 if (ret)
723 ret = -EINVAL;
724
725 asr_sha_buff_cleanup(ctx->dd, buflen);
726 return ret;
727}
728
729static void sha_finish_req(struct asr_sha_reqctx *ctx, int *err)
730{
731 uint8_t *hash = (uint8_t *)ctx->digest;
732 struct crypto_ahash *tfm = crypto_ahash_reqtfm(ctx->dd->req);
733 uint32_t hash_size = crypto_ahash_digestsize(tfm);
734
735 if (!(*err) && (ctx->flags & SHA_FLAGS_FINAL)) {
736 *err = hash_op_finish(ctx, (uint8_t *)hash, hash_size);
737 asr_sha_copy_ready_hash(ctx->dd->req);
738 ctx->flags &= (~SHA_FLAGS_FINAL);
739 } else {
740 ctx->flags |= SHA_FLAGS_ERROR;
741 }
742}
743
744static void sha_next_req(struct asr_sha_reqctx *ctx, int *err)
745{
746 if (likely(!(*err) && (SHA_FLAGS_FINAL & ctx->flags)))
747 sha_finish_req(ctx, err);
748
749 (void)asr_sha_complete(ctx->dd, *err);
750}
751
752static int asr_sha_start(struct asr_te200_sha *dd)
753{
754 int err = 0;
755 struct ahash_request *req = dd->req;
756 struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
757 struct asr_te200_dev *te200_dd = dev_get_drvdata(dd->dev);
758 struct asr_te200_ops *te200_ops = te200_dd->te200_ops;
759
760 te200_ops->dev_get(te200_dd);
761
762 dd->resume = asr_sha_done;
763
764 if ((ctx->flags & SHA_FLAGS_INIT)) {
765 err = sha_init_req(ctx);
766 ctx->flags &= (~SHA_FLAGS_INIT);
767 if (err) {
768 te200_ops->dev_put(te200_dd);
769 return err;
770 }
771 }
772
773 if (ctx->op == SHA_OP_UPDATE) {
774 err = sha_update_req(ctx);
775 if (!err && (ctx->flags & SHA_FLAGS_FINUP))
776 /* no final() after finup() */
777 sha_finish_req(ctx, &err);
778 } else if (ctx->op == SHA_OP_FINAL) {
779 sha_finish_req(ctx, &err);
780 }
781
782 if (unlikely(err != -EINPROGRESS)) {
783 /* Task will not finish it, so do it here */
784 sha_next_req(ctx, &err);
785 }
786
787 te200_ops->dev_put(te200_dd);
788 return err;
789}
790
791static int asr_sha_cra_init(struct crypto_tfm *tfm)
792{
793 struct asr_sha_ctx *ctx = crypto_tfm_ctx(tfm);
794 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
795 sizeof(struct asr_sha_reqctx));
796 ctx->start = asr_sha_start;
797
798 return 0;
799}
800
801static void asr_sha_cra_exit(struct crypto_tfm *tfm)
802{
803 struct asr_sha_ctx *ctx = crypto_tfm_ctx(tfm);
804 memset(ctx, 0, sizeof(*ctx));
805}
806
807static inline void asr_sha_get(struct asr_te200_sha *dd)
808{
809 mutex_lock(&dd->sha_lock);
810}
811
812static inline void asr_sha_put(struct asr_te200_sha *dd)
813{
814 if(mutex_is_locked(&dd->sha_lock))
815 mutex_unlock(&dd->sha_lock);
816}
817
818static int asr_sha_init(struct ahash_request *req)
819{
820 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
821 struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
822 struct asr_te200_sha *dd = asr_sha_local;
823
824 asr_sha_get(dd);
825
826 ctx->dd = dd;
827 ctx->flags = 0;
828 ctx->alg = 0;
829
830 switch (crypto_ahash_digestsize(tfm)) {
831 case SHA1_DIGEST_SIZE:
832 ctx->flags |= SHA_FLAGS_SHA1;
833 ctx->alg = HASH_SHA1;
834 break;
835 case SHA224_DIGEST_SIZE:
836 ctx->flags |= SHA_FLAGS_SHA224;
837 ctx->alg = HASH_SHA224;
838 break;
839 case SHA256_DIGEST_SIZE:
840 ctx->flags |= SHA_FLAGS_SHA256;
841 ctx->alg = HASH_SHA256;
842 break;
843 default:
844 asr_sha_put(dd);
845 return -EINVAL;
846 }
847
848 ctx->bufcnt = 0;
849
850 ctx->flags |= SHA_FLAGS_INIT;
851
852 asr_sha_put(dd);
853 return 0;
854}
855
856static int asr_sha_update(struct ahash_request *req)
857{
858 int ret = 0;
859 struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
860
861 asr_sha_get(ctx->dd);
862
863 ctx->total = req->nbytes;
864 ctx->sg = req->src;
865 ctx->offset = 0;
866
867 ret = asr_sha_enqueue(req, SHA_OP_UPDATE);
868
869 asr_sha_put(ctx->dd);
870 return ret;
871}
872
873static int asr_sha_final(struct ahash_request *req)
874{
875 int ret = 0;
876 struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
877
878 asr_sha_get(ctx->dd);
879
880 ctx->flags |= SHA_FLAGS_FINAL;
881 if (ctx->flags & SHA_FLAGS_ERROR) {
882 asr_sha_put(ctx->dd);
883 return 0; /* uncompleted hash is not needed */
884 }
885 ret = asr_sha_enqueue(req, SHA_OP_FINAL);
886
887 asr_sha_put(ctx->dd);
888 return ret;
889}
890
891static int asr_sha_finup(struct ahash_request *req)
892{
893 struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
894 int err1, err2;
895
896 ctx->flags |= SHA_FLAGS_FINUP;
897
898 err1 = asr_sha_update(req);
899 if (err1 == -EINPROGRESS ||
900 (err1 == -EBUSY && (ahash_request_flags(req) &
901 CRYPTO_TFM_REQ_MAY_BACKLOG))) {
902 asr_sha_put(ctx->dd);
903 return err1;
904 }
905 /*
906 * final() has to be always called to cleanup resources
907 * even if udpate() failed, except EINPROGRESS
908 */
909 err2 = asr_sha_final(req);
910
911 return err1 ?: err2;
912}
913
914static int asr_sha_digest(struct ahash_request *req)
915{
916 return asr_sha_init(req) ?: asr_sha_finup(req);
917}
918
919static int asr_sha_export(struct ahash_request *req, void *out)
920{
921 const struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
922
923 memcpy(out, ctx, sizeof(*ctx));
924 return 0;
925}
926
927static int asr_sha_import(struct ahash_request *req, const void *in)
928{
929 struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
930
931 memcpy(ctx, in, sizeof(*ctx));
932 return 0;
933}
934
935static struct ahash_alg sha_algs[] = {
936 /* sha1 */
937 {
938 .init = asr_sha_init,
939 .update = asr_sha_update,
940 .final = asr_sha_final,
941 .finup = asr_sha_finup,
942 .digest = asr_sha_digest,
943 .export = asr_sha_export,
944 .import = asr_sha_import,
945 .halg = {
946 .digestsize = SHA1_DIGEST_SIZE,
947 .statesize = sizeof(struct asr_sha_reqctx),
948 .base = {
949 .cra_name = "sha1",
950 .cra_driver_name = "asr-sha1",
951 .cra_priority = 300,
952 .cra_flags = CRYPTO_ALG_ASYNC,
953 .cra_blocksize = SHA1_BLOCK_SIZE,
954 .cra_ctxsize = sizeof(struct asr_sha_ctx),
955 .cra_alignmask = 0,
956 .cra_module = THIS_MODULE,
957 .cra_init = asr_sha_cra_init,
958 .cra_exit = asr_sha_cra_exit,
959 }
960 }
961 },
962 /* sha224 */
963 {
964 .init = asr_sha_init,
965 .update = asr_sha_update,
966 .final = asr_sha_final,
967 .finup = asr_sha_finup,
968 .digest = asr_sha_digest,
969 .export = asr_sha_export,
970 .import = asr_sha_import,
971 .halg = {
972 .digestsize = SHA224_DIGEST_SIZE,
973 .statesize = sizeof(struct asr_sha_reqctx),
974 .base = {
975 .cra_name = "sha224",
976 .cra_driver_name = "asr-sha224",
977 .cra_priority = 300,
978 .cra_flags = CRYPTO_ALG_ASYNC,
979 .cra_blocksize = SHA224_BLOCK_SIZE,
980 .cra_ctxsize = sizeof(struct asr_sha_ctx),
981 .cra_alignmask = 0,
982 .cra_module = THIS_MODULE,
983 .cra_init = asr_sha_cra_init,
984 .cra_exit = asr_sha_cra_exit,
985 }
986 }
987 },
988 /* sha256 */
989 {
990 .init = asr_sha_init,
991 .update = asr_sha_update,
992 .final = asr_sha_final,
993 .finup = asr_sha_finup,
994 .digest = asr_sha_digest,
995 .export = asr_sha_export,
996 .import = asr_sha_import,
997 .halg = {
998 .digestsize = SHA256_DIGEST_SIZE,
999 .statesize = sizeof(struct asr_sha_reqctx),
1000 .base = {
1001 .cra_name = "sha256",
1002 .cra_driver_name = "asr-sha256",
1003 .cra_priority = 300,
1004 .cra_flags = CRYPTO_ALG_ASYNC,
1005 .cra_blocksize = SHA256_BLOCK_SIZE,
1006 .cra_ctxsize = sizeof(struct asr_sha_ctx),
1007 .cra_alignmask = 0,
1008 .cra_module = THIS_MODULE,
1009 .cra_init = asr_sha_cra_init,
1010 .cra_exit = asr_sha_cra_exit,
1011 }
1012 }
1013 },
1014};
1015
1016static void asr_sha_queue_task(unsigned long data)
1017{
1018 struct asr_te200_sha *dd = (struct asr_te200_sha *)data;
1019
1020 asr_sha_handle_queue(dd, NULL);
1021}
1022
1023static int asr_sha_done(struct asr_te200_sha *dd)
1024{
1025 int err = 0;
1026
1027 if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
1028 dd->flags &= ~SHA_FLAGS_OUTPUT_READY;
1029 }
1030
1031 return err;
1032}
1033
1034static void asr_sha_done_task(unsigned long data)
1035{
1036 struct asr_te200_sha *dd = (struct asr_te200_sha *)data;
1037
1038 dd->is_async = true;
1039 (void)dd->resume(dd);
1040}
1041
1042#ifdef ASR_TE200_SHA_TEST
1043 static int te200_sha_test(struct asr_te200_sha *dd);
1044#endif
1045
1046int asr_te200_sha_register(struct asr_te200_dev *te200_dd)
1047{
1048 int err, i, j;
1049 struct device_node *np = NULL;
1050 struct asr_te200_sha *sha_dd;
1051
1052 sha_dd = &te200_dd->asr_sha;
1053
1054 sha_dd->dev = te200_dd->dev;
1055 sha_dd->io_base = te200_dd->io_base;
1056 sha_dd->phys_base = te200_dd->phys_base;
1057
1058 np = sha_dd->dev->of_node;
1059
1060 asr_sha_local = sha_dd;
1061
1062 spin_lock_init(&sha_dd->lock);
1063 mutex_init(&sha_dd->sha_lock);
1064 tasklet_init(&sha_dd->done_task, asr_sha_done_task,
1065 (unsigned long)sha_dd);
1066 tasklet_init(&sha_dd->queue_task, asr_sha_queue_task,
1067 (unsigned long)sha_dd);
1068 crypto_init_queue(&sha_dd->queue, ASR_SHA_QUEUE_LENGTH);
1069
1070
1071 for (i = 0; i < ARRAY_SIZE(sha_algs); i++) {
1072 err = crypto_register_ahash(&sha_algs[i]);
1073 if (err)
1074 goto err_sha_algs;
1075 }
1076
1077#ifdef ASR_TE200_SHA_TEST
1078 te200_sha_test(sha_dd);
1079#endif
1080
1081 return 0;
1082
1083err_sha_algs:
1084 for (j = 0; j < i; j++)
1085 crypto_unregister_ahash(&sha_algs[j]);
1086
1087 return err;
1088}
1089EXPORT_SYMBOL_GPL(asr_te200_sha_register);
1090
1091int asr_te200_sha_unregister(struct asr_te200_dev *te200_dd)
1092{
1093 int i;
1094 struct asr_te200_sha *sha_dd = &te200_dd->asr_sha;
1095
1096 for (i = 0; i < ARRAY_SIZE(sha_algs); i++)
1097 crypto_unregister_ahash(&sha_algs[i]);
1098
1099 tasklet_kill(&sha_dd->queue_task);
1100 tasklet_kill(&sha_dd->done_task);
1101
1102 return 0;
1103}
1104
1105#ifdef ASR_TE200_SHA_TEST
1106static int te200_sha_test(struct asr_te200_sha *dd)
1107{
1108 int ret = 0;
1109
1110 const struct {
1111 const char *msg;
1112 uint8_t hash[20];
1113 } sha1_tests[] = {
1114 {
1115 "abc",
1116 { 0xa9, 0x99, 0x3e, 0x36, 0x47, 0x06,
1117 0x81, 0x6a, 0xba, 0x3e, 0x25, 0x71,
1118 0x78, 0x50, 0xc2, 0x6c, 0x9c, 0xd0,
1119 0xd8, 0x9d
1120 }
1121 },
1122 {
1123 "asjhsdjljfdsdjjkdfwyqeuwouzxkmcxjkmwqds"
1124 "jklfdfjlkdfkfsfkjlfskjdflioherfjjfdjkfd"
1125 "nkfdfdojjodfjdfjflj;sljjlfkklnfnkgbhhoi"
1126 "gfhigfopojpfjojpoffkjlfskjdflioherfjjfd"
1127 "jkfdnkfdfdojjodfjdfjfljnfnkgbhhoigfhigf"
1128 "oponfnkgbhhoigfhigfopojpfjoewiroiowiods"
1129 "djkisijdknknkskdnknflnnesniewinoinknmdn"
1130 "kknknsdnjjfsnnkfnkknslnklknfnknkflksnlk"
1131 "lskldklklklnmlflmlmlfmlfml",
1132 {
1133 0xc4, 0x53, 0xca, 0x24, 0xfa, 0xe5,
1134 0x39, 0x53, 0x08, 0x8c, 0x57, 0x1a,
1135 0x96, 0xe9, 0x64, 0x7f, 0xd5, 0xf9,
1136 0x13, 0x91
1137 }
1138 },
1139 {
1140 "asjhsdjljfdsdjjkdfwyqeuwouzxkmcxjkmwqdsjklfdfjlkdfkfs"
1141 "fkjlfskjdflioherfjjfdjkfdnkfdfdojjodfjdfjflj;sljjlfkkl"
1142 "nfnkgbhhoigfhigfopojpfjojpoffkjlfskjdflioherfjjfdjkfdn"
1143 "kfdfdojjodfjdfjfljnfnkgbhhoigfhigfoponfnkgbhhoigfhigfo"
1144 "pojpfjoewiroiowiodsdjkisijdknknkskdnknflnnesniewinoinkn"
1145 "mdnkknknsdnjjfsnnkfnkknslnklknfnknkflksnlklskldklklklnm"
1146 "lflmlmlfmlfml",
1147 {
1148 0xc4, 0x53, 0xca, 0x24, 0xfa, 0xe5,
1149 0x39, 0x53, 0x08, 0x8c, 0x57, 0x1a,
1150 0x96, 0xe9, 0x64, 0x7f, 0xd5, 0xf9,
1151 0x13, 0x91
1152 }
1153 }
1154 };
1155
1156 struct asr_sha_reqctx ctx1;
1157 struct asr_sha_reqctx ctx2;
1158 struct asr_sha_reqctx ctx3;
1159
1160 unsigned char out_sha1_1[20] = {0};
1161 unsigned char out_sha1_2[20] = {0};
1162 unsigned char out_sha1_3[20] = {0};
1163
1164 ctx1.dd = dd;
1165 ctx2.dd = dd;
1166 ctx3.dd = dd;
1167
1168 ret = hash_op_init(&ctx1, HASH_SHA1);
1169 if (ret)
1170 return ret;
1171 ret = hash_op_proc(&ctx1, (uint8_t *)sha1_tests[0].msg, strlen(sha1_tests[0].msg));
1172 if (ret)
1173 return ret;
1174 ret = hash_op_init(&ctx2, HASH_SHA1);
1175 if (ret)
1176 return ret;
1177 ret = hash_op_proc(&ctx2, (uint8_t *)sha1_tests[1].msg, 10);
1178 if (ret)
1179 return ret;
1180 ret = hash_op_finish(&ctx1, out_sha1_1, sizeof(out_sha1_1));
1181 if (ret)
1182 return ret;
1183 ret = hash_op_init(&ctx3, HASH_SHA1);
1184 if (ret)
1185 return ret;
1186 ret = hash_op_proc(&ctx2, (uint8_t *)sha1_tests[1].msg+10, strlen(sha1_tests[1].msg)-10);
1187 if (ret)
1188 return ret;
1189 ret = hash_op_proc(&ctx3, (uint8_t *)sha1_tests[2].msg, 23);
1190 if (ret)
1191 return ret;
1192 ret = hash_op_finish(&ctx2, out_sha1_2, sizeof(out_sha1_2));
1193 if (ret)
1194 return ret;
1195 ret = hash_op_proc(&ctx3, (uint8_t *)sha1_tests[2].msg+23, strlen(sha1_tests[2].msg)-23);
1196 if (ret)
1197 return ret;
1198 ret = hash_op_finish(&ctx3, out_sha1_3, sizeof(out_sha1_3));
1199 if (ret)
1200 return ret;
1201
1202 if (memcmp(out_sha1_1, sha1_tests[0].hash, sizeof(out_sha1_1))) {
1203 printk("sha1 test 0 failed");
1204 } else {
1205 printk("sha1 test 0 pass");
1206 }
1207 if (memcmp(out_sha1_2, sha1_tests[1].hash, sizeof(out_sha1_2))) {
1208 printk("sha1 test 1 failed");
1209 } else {
1210 printk("sha1 test 1 pass");
1211 }
1212 if (memcmp(out_sha1_3, sha1_tests[2].hash, sizeof(out_sha1_3))) {
1213 printk("sha1 test 2 failed");
1214 } else {
1215 printk("sha1 test 2 pass");
1216 }
1217
1218 return 0;
1219}
1220#endif
1221
1222EXPORT_SYMBOL_GPL(asr_te200_sha_unregister);