blob: acaac2553e517ea0f34afbd1cce107c32cbaf531 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001#include <linux/module.h>
2#include <linux/slab.h>
3#include <linux/err.h>
4#include <linux/clk-provider.h>
5#include <linux/clk.h>
6#include <linux/io.h>
7#include <linux/hw_random.h>
8#include <linux/platform_device.h>
9#include <linux/scatterlist.h>
10#include <crypto/scatterwalk.h>
11#include <linux/of_device.h>
12#include <linux/mutex.h>
13#include <linux/device.h>
14#include <linux/init.h>
15#include <linux/delay.h>
16#include <crypto/hmac.h>
17#include <crypto/md5.h>
18#include <crypto/sha.h>
19
20#include "asr-bcm.h"
21#include "asr-sha.h"
22
23// #define ASR_BCM_SHA_TEST
24
25static struct asr_bcm_sha *asr_sha_local = NULL;
26
27static inline u32 asr_sha_read(struct asr_bcm_sha *dd, u32 offset)
28{
29 u32 value = readl_relaxed(dd->io_base + offset);
30
31 return value;
32}
33
34static inline void asr_sha_write(struct asr_bcm_sha *dd,
35 u32 offset, u32 value)
36{
37 writel_relaxed(value, dd->io_base + offset);
38}
39
40/* ------- bcm sha hardware operation -------- */
41static void hash_sw_reset(struct asr_bcm_sha *dd)
42{
43 uint32_t val;
44
45 val = (0x1 << 0x3);
46 asr_sha_write(dd, HASH_CONTROL, val);
47 val = 0x0;
48 asr_sha_write(dd, HASH_CONTROL, val);
49
50 return;
51}
52
53static int hash_set_mode(struct asr_bcm_sha *dd, \
54 HASH_MODE_T mode, HASH_ALGO_T algo)
55{
56 uint32_t val;
57
58 val = asr_sha_read(dd, HASH_CONFIG);
59 val &= ~0xf;
60 val |= algo;
61 if (mode == HASH_HMAC)
62 val |= (0x1 << 0x3);
63 asr_sha_write(dd, HASH_CONFIG, val);
64
65 return 0;
66}
67
68static int hash_kick(struct asr_bcm_sha *dd)
69{
70 uint32_t val;
71 uint32_t cnt;
72
73 val = asr_sha_read(dd, HASH_COMMAND);
74 val |= (0x1 << 0x0);
75 asr_sha_write(dd, HASH_COMMAND, val);
76
77 cnt = 1;
78 /* wait for command */
79
80 do {
81 val = asr_sha_read(dd, HASH_STATUS);
82 if (cnt == 1000000) {
83 dev_err(dd->dev, "hash kick wait busy %u times..0x%08x\n", cnt, val);
84 return -1;
85 }
86 val &= 0xE;
87 udelay(1);
88 cnt++;
89 } while(val != 0);
90
91 cnt = 1;
92 do {
93 val = asr_sha_read(dd, HASH_STATUS);
94 if (cnt == 1000000) {
95 dev_err(dd->dev, "hash kick wait busy %u times..0x%08x\n", cnt, val);
96 return -1;
97 }
98 val &= 0x1;
99 udelay(1);
100 cnt++;
101 } while(val == 0);
102
103 /* clear status so next command can be issued */
104 asr_sha_write(dd, HASH_STATUS, val);
105
106 return 0;
107}
108
109static int hash_config_op(struct asr_bcm_sha *dd, HASH_OP_MODE_T op_mode)
110{
111 uint32_t val;
112 int ret = 0;
113
114 if (op_mode < HASH_INIT || op_mode > HASH_FINAL)
115 return -1;
116
117 val = asr_sha_read(dd, HASH_CONTROL);
118 val &= ~(0x3 << 0x0);
119 val |= op_mode;
120 asr_sha_write(dd, HASH_CONTROL, val);
121
122 ret = hash_kick(dd);
123 return ret;
124}
125
126static int hash_save_context(struct asr_sha_reqctx *ctx, int alg)
127{
128 int i;
129 struct hash_state *md = &ctx->md;
130 struct asr_bcm_sha *dd = ctx->dd;
131 switch(alg) {
132 case HASH_SHA384:
133 case HASH_SHA512:
134 for (i = 0; i < 8; i++) {
135 md->sha512.state[i] = asr_sha_read(dd, HASH_DIGEST(i));
136 md->sha512.state[i+8] = asr_sha_read(dd, HASH_DIGEST_H(i));
137 }
138 break;
139 case HASH_SHA256:
140 case HASH_SHA224:
141 for (i = 0; i < 8; i++) {
142 md->sha256.state[i] = asr_sha_read(dd, HASH_DIGEST(i));
143 }
144 break;
145 case HASH_SHA1:
146 for (i = 0; i < 5; i++) {
147 md->sha1.state[i] = asr_sha_read(dd, HASH_DIGEST(i));
148 }
149 break;
150 case HASH_MD5:
151 for (i = 0; i < 4; i++) {
152 md->md5.state[i] = asr_sha_read(dd, HASH_DIGEST(i));
153 }
154 break;
155 default:
156 dev_err(dd->dev, "hash save context: invalid alg!\r\n");
157 return -1;
158 }
159 return 0;
160}
161
162static int hash_restore_context(struct asr_sha_reqctx *ctx, int alg)
163{
164 int i;
165 struct hash_state *md = &ctx->md;
166 struct asr_bcm_sha *dd = ctx->dd;
167
168 switch(alg) {
169 case HASH_SHA384:
170 case HASH_SHA512:
171 for (i = 0; i < 8; i++) {
172 asr_sha_write(dd, HASH_DIGEST(i), md->sha512.state[i]);
173 asr_sha_write(dd, HASH_DIGEST_H(i), md->sha512.state[i+8]);
174 }
175 break;
176 case HASH_SHA256:
177 case HASH_SHA224:
178 for (i = 0; i < 8; i++) {
179 asr_sha_write(dd, HASH_DIGEST(i), md->sha256.state[i]);
180 }
181 break;
182 case HASH_SHA1:
183 for (i = 0; i < 5; i++) {
184 asr_sha_write(dd, HASH_DIGEST(i), md->sha1.state[i]);
185 }
186 break;
187 case HASH_MD5:
188 for (i = 0; i < 4; i++) {
189 asr_sha_write(dd, HASH_DIGEST(i), md->md5.state[i]);
190 }
191 break;
192 default:
193 dev_err(dd->dev, "hash restore context: invalid alg!\r\n");
194 return -1;
195 }
196
197 return 0;
198}
199
200static inline void sha_cache_operation(void *addr, int size)
201{
202 __cpuc_flush_dcache_area(addr, size);
203}
204
205static int hash_compress_aligned(struct asr_sha_reqctx *ctx, int alg, uint8_t *in, int data_len)
206{
207 int ret = 0;
208 struct asr_bcm_sha *dd = ctx->dd;
209 struct asr_bcm_dev *dev_dd = container_of(dd, struct asr_bcm_dev, asr_sha);
210 struct asr_bcm_ops *bcm_ops = dev_dd->bcm_ops;
211
212 bcm_ops->dev_get(dev_dd);
213
214 if (((uint32_t)in & 0x3) || (data_len == 0))
215 return -1;
216
217 adec_engine_hw_reset(dev_dd, ACC_ENG_HASH);
218 hash_sw_reset(dd);
219 ret = hash_set_mode(dd, HASH_SIMPLE, alg);
220 if (ret)
221 goto error;
222
223 adec_engine_hw_reset(dev_dd, ACC_ENG_DMA);
224 abus_set_mode(dev_dd, ABUS_GRP_A_HASH, ABUS_GRP_B_AES, ABUS_CROSS, ABUS_STRAIGHT);
225 dma_input_config(dev_dd, 0, 0);
226 ret = hash_restore_context(ctx, alg);
227 if (ret)
228 goto error;
229
230 ret = dma_input_address(dev_dd, (uint32_t)virt_to_phys((void *)in), \
231 ROUND_UP_TO_WORD_CNT(data_len), 0);
232 if (ret)
233 goto error;
234
235 sha_cache_operation(in, (ROUND_UP_TO_WORD_CNT(data_len) << 2));
236 dma_input_start(dev_dd);
237 asr_sha_write(dd, HASH_INCOME_SEG_SZ, data_len);
238 ret = hash_config_op(dd, HASH_UPDATE);
239 if (ret) {
240 dma_input_stop(dev_dd);
241 goto error;
242 }
243
244 dma_wait_input_finish(dev_dd);
245 dma_input_stop(dev_dd);
246
247 ret = hash_save_context(ctx, alg);
248 if (ret)
249 goto error;
250
251error:
252 bcm_ops->dev_put(dev_dd);
253 return ret;
254}
255
256static int hash_compress(struct asr_sha_reqctx *ctx, int alg, uint8_t *in, int blks, int blk_sz)
257{
258 uint8_t *dma_in = NULL;
259 int data_len = blks * blk_sz;
260 int ret, n;
261 uint8_t *ptr_in;
262
263 if (((uint32_t)in & 0x3) == 0) {
264 dma_in = in;
265 ret = hash_compress_aligned(ctx, alg, dma_in, data_len);
266 return ret;
267 }
268
269 n = min(data_len, HASH_ALIGN_BUF_SIZE);
270 dma_in = (uint8_t *)kmalloc((n + 0x10), GFP_KERNEL);
271 if (!dma_in) {
272 ret = -1;
273 goto exit;
274 }
275 dma_in = (uint8_t *)(((uint32_t)(dma_in)) & (~0x3));
276
277 ptr_in = in;
278 do {
279 n = min(data_len, HASH_ALIGN_BUF_SIZE);
280 memcpy((void *)dma_in, (void *)ptr_in, n);
281 ret = hash_compress_aligned(ctx, alg, dma_in, n);
282 if (ret) {
283 goto exit;
284 }
285 data_len -= n;
286 ptr_in +=n;
287 } while(data_len > 0);
288
289exit:
290 if (dma_in)
291 kfree(dma_in);
292 return ret;
293}
294
295static int hash_tail_process(struct asr_sha_reqctx *ctx, uint8_t *out, int out_size, \
296 uint64_t total_size, int tail_size, unsigned char *dma_addr, int alg)
297{
298 int ret = 0;
299 int reg_val, i;
300 struct asr_bcm_sha *dd = ctx->dd;
301 struct asr_bcm_dev *dev_dd = container_of(dd, struct asr_bcm_dev, asr_sha);
302 struct asr_bcm_ops *bcm_ops = dev_dd->bcm_ops;
303
304 bcm_ops->dev_get(dev_dd);
305
306 adec_engine_hw_reset(dev_dd, ACC_ENG_HASH);
307 hash_sw_reset(dd);
308 ret = hash_set_mode(dd, HASH_SIMPLE, alg);
309 if (ret)
310 goto error;
311
312 adec_engine_hw_reset(dev_dd, ACC_ENG_DMA);
313 abus_set_mode(dev_dd, ABUS_GRP_A_HASH, ABUS_GRP_B_AES, ABUS_CROSS, ABUS_STRAIGHT);
314 dma_input_config(dev_dd, 0, 0);
315 ret = hash_restore_context(ctx, alg);
316 if (ret)
317 goto error;
318
319 ret = dma_input_address(dev_dd, (uint32_t)virt_to_phys((void *)dma_addr), \
320 ROUND_UP_TO_WORD_CNT(tail_size), 0);
321 if (ret)
322 goto error;
323
324 if (tail_size) {
325 sha_cache_operation(dma_addr, (ROUND_UP_TO_WORD_CNT(tail_size) << 2));
326 dma_input_start(dev_dd);
327 }
328
329 asr_sha_write(dd, HASH_INCOME_SEG_SZ, tail_size);
330 asr_sha_write(dd, HASH_TOTAL_MSG_SZ_L, (total_size & 0xffffffff));
331 asr_sha_write(dd, HASH_TOTAL_MSG_SZ_H, (total_size >> 32));
332
333 reg_val = asr_sha_read(dd, HASH_CONTROL);
334 reg_val |= (0x1 << 0x2);
335 asr_sha_write(dd, HASH_CONTROL, reg_val);
336
337 ret = hash_config_op(dd, HASH_FINAL);
338 if (ret) {
339 if (tail_size)
340 dma_input_stop(dev_dd);
341 goto error;
342 }
343
344 if (tail_size) {
345 dma_wait_input_finish(dev_dd);
346 dma_input_stop(dev_dd);
347 }
348
349 /* copy digest out */
350 if (alg == HASH_SHA384 || alg == HASH_SHA512) {
351 for (i = 0; i < (out_size / 8); i++) {
352 reg_val = asr_sha_read(dd, HASH_DIGEST(i));
353 out[4 + i * 8] = (uint8_t)(reg_val & 0xFF);
354 out[5 + i * 8] = (uint8_t)((reg_val >> 8) & 0xFF);
355 out[6 + i * 8] = (uint8_t)((reg_val >> 16) & 0xFF);
356 out[7 + i * 8] = (uint8_t)((reg_val >> 24) & 0xFF);
357 reg_val = asr_sha_read(dd, HASH_DIGEST_H(i));
358 out[0 + i * 8] = (uint8_t)(reg_val & 0xFF);
359 out[1 + i * 8] = (uint8_t)((reg_val >> 8) & 0xFF);
360 out[2 + i * 8] = (uint8_t)((reg_val >> 16) & 0xFF);
361 out[3 + i * 8] = (uint8_t)((reg_val >> 24) & 0xFF);
362 }
363 } else {
364 for (i = 0; i < (out_size / 4); i++) {
365 reg_val = asr_sha_read(dd, HASH_DIGEST(i));
366 out[0 + i * 4] = (uint8_t)(reg_val & 0xFF);
367 out[1 + i * 4] = (uint8_t)((reg_val >> 8) & 0xFF);
368 out[2 + i * 4] = (uint8_t)((reg_val >> 16) & 0xFF);
369 out[3 + i * 4] = (uint8_t)((reg_val >> 24) & 0xFF);
370 }
371 }
372
373error:
374 bcm_ops->dev_put(dev_dd);
375 return ret;
376}
377
378static int hash_init(struct asr_sha_reqctx *ctx, int alg)
379{
380 int ret;
381 struct asr_bcm_sha *dd = ctx->dd;
382 struct asr_bcm_dev *dev_dd = container_of(dd, struct asr_bcm_dev, asr_sha);
383 struct asr_bcm_ops *bcm_ops = dev_dd->bcm_ops;
384
385 bcm_ops->dev_get(dev_dd);
386
387 adec_engine_hw_reset(dev_dd, ACC_ENG_HASH);
388 hash_sw_reset(dd);
389
390 ret = hash_set_mode(dd, HASH_SIMPLE, alg);
391 if (ret)
392 goto error;
393 ret = hash_config_op(dd, HASH_INIT);
394 if (ret)
395 goto error;
396
397 ret = hash_save_context(ctx, alg);
398 if (ret)
399 goto error;
400
401error:
402 bcm_ops->dev_put(dev_dd);
403 return ret;
404}
405
406/* Only block algnie is processed at a time */
407static int hash_process(struct asr_sha_reqctx *ctx, int alg, uint8_t *in, uint32_t inlen)
408{
409 int err;
410 uint32_t n, blocks;
411 struct hash_state *md = &ctx->md;
412
413 if (md->curlen > sizeof(md->buf)) {
414 return -1;
415 }
416
417 while (inlen > 0) {
418 if (md->curlen == 0 && inlen >= md->block_size) {
419 blocks = inlen / md->block_size;
420 err = hash_compress(ctx, alg, in, blocks, md->block_size);
421 if (err)
422 return err;
423 md->length += blocks * md->block_size * 8;
424 in += blocks * md->block_size;
425 inlen -= blocks * md->block_size;
426 } else {
427 n = min(inlen, (md->block_size - md->curlen));
428 memcpy(md->buf + md->curlen, in, n);
429 md->curlen += n;
430 in += n;
431 inlen -= n;
432 if (md->curlen == md->block_size) {
433 err = hash_compress(ctx, alg, md->buf, 1, md->block_size);
434 if (err)
435 return err;
436 md->length += 8*md->block_size;
437 md->curlen = 0;
438 }
439 }
440 }
441
442 return 0;
443}
444
445static int hash_done(struct asr_sha_reqctx *ctx, int alg, uint8_t *out)
446{
447 uint32_t out_len;
448 struct hash_state *md = &ctx->md;
449 struct asr_bcm_sha *dd = ctx->dd;
450
451 switch(alg) {
452 case HASH_SHA512:
453 out_len = HASH_LEN_SHA512;
454 break;
455 case HASH_SHA384:
456 out_len = HASH_LEN_SHA384;
457 break;
458 case HASH_SHA256:
459 out_len = HASH_LEN_SHA256;
460 break;
461 case HASH_SHA224:
462 out_len = HASH_LEN_SHA224;
463 break;
464 case HASH_SHA1:
465 out_len = HASH_LEN_SHA1;
466 break;
467 case HASH_MD5:
468 out_len = HASH_LEN_MD5;
469 break;
470 default:
471 dev_err(dd->dev, "err: not support hash alg\n");
472 return -1;
473 }
474
475 return hash_tail_process(ctx, out, out_len, \
476 (md->length / 8 + md->curlen), md->curlen, md->buf, alg);
477}
478/* ------- end -------- */
479
480static size_t asr_sha_append_sg(struct asr_sha_reqctx *ctx)
481{
482 size_t count;
483
484 while ((ctx->bufcnt < ctx->buflen) && ctx->total) {
485 count = min(ctx->sg->length - ctx->offset, ctx->total);
486 count = min(count, ctx->buflen - ctx->bufcnt);
487
488 if (count <= 0) {
489 /*
490 * Check if count <= 0 because the buffer is full or
491 * because the sg length is 0. In the latest case,
492 * check if there is another sg in the list, a 0 length
493 * sg doesn't necessarily mean the end of the sg list.
494 */
495 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
496 ctx->sg = sg_next(ctx->sg);
497 continue;
498 } else {
499 break;
500 }
501 }
502
503 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
504 ctx->offset, count, 0);
505
506 ctx->bufcnt += count;
507 ctx->offset += count;
508 ctx->total -= count;
509
510 if (ctx->offset == ctx->sg->length) {
511 ctx->sg = sg_next(ctx->sg);
512 if (ctx->sg)
513 ctx->offset = 0;
514 else
515 ctx->total = 0;
516 }
517 }
518
519 return 0;
520}
521
522static int asr_sha_handle_queue(struct asr_bcm_sha *dd,
523 struct ahash_request *req)
524{
525 struct crypto_async_request *async_req, *backlog;
526 struct asr_sha_ctx *ctx;
527 unsigned long flags;
528 bool start_async;
529 int err = 0, ret = 0;
530
531 spin_lock_irqsave(&dd->lock, flags);
532 if (req)
533 ret = ahash_enqueue_request(&dd->queue, req);
534
535 if (SHA_FLAGS_BUSY & dd->flags) {
536 spin_unlock_irqrestore(&dd->lock, flags);
537 return ret;
538 }
539
540 backlog = crypto_get_backlog(&dd->queue);
541 async_req = crypto_dequeue_request(&dd->queue);
542 if (async_req)
543 dd->flags |= SHA_FLAGS_BUSY;
544
545 spin_unlock_irqrestore(&dd->lock, flags);
546
547 if (!async_req) {
548 return ret;
549 }
550
551 if (backlog)
552 backlog->complete(backlog, -EINPROGRESS);
553
554 ctx = crypto_tfm_ctx(async_req->tfm);
555
556 dd->req = ahash_request_cast(async_req);
557 start_async = (dd->req != req);
558 dd->is_async = start_async;
559 dd->force_complete = false;
560
561 /* WARNING: ctx->start() MAY change dd->is_async. */
562 err = ctx->start(dd);
563 return (start_async) ? ret : err;
564}
565
566static int asr_sha_enqueue(struct ahash_request *req, unsigned int op)
567{
568 struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
569 struct asr_bcm_sha *dd = ctx->dd;
570
571 ctx->op = op;
572
573 return asr_sha_handle_queue(dd, req);
574}
575
576static void asr_sha_copy_ready_hash(struct ahash_request *req)
577{
578 struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
579
580 if (!req->result)
581 return;
582
583 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
584 case SHA_FLAGS_MD5:
585 memcpy(req->result, ctx->digest, MD5_DIGEST_SIZE);
586 break;
587 case SHA_FLAGS_SHA1:
588 memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
589 break;
590 case SHA_FLAGS_SHA224:
591 memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE);
592 break;
593 case SHA_FLAGS_SHA256:
594 memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
595 break;
596 case SHA_FLAGS_SHA384:
597 memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE);
598 break;
599 case SHA_FLAGS_SHA512:
600 memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE);
601 break;
602 default:
603 return;
604 }
605}
606
607static inline int asr_sha_complete(struct asr_bcm_sha *dd, int err)
608{
609 struct ahash_request *req = dd->req;
610 struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
611
612 dd->flags &= ~(SHA_FLAGS_BUSY);
613 ctx->flags &= ~(SHA_FLAGS_FINAL);
614
615 if ((dd->is_async || dd->force_complete) && req->base.complete)
616 req->base.complete(&req->base, err);
617
618 /* handle new request */
619 tasklet_schedule(&dd->queue_task);
620
621 return err;
622}
623
624static int asr_sha_buff_init(struct asr_bcm_sha *dd, uint32_t len)
625{
626 struct ahash_request *req = dd->req;
627 struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
628
629 ctx->buffer = (void *)__get_free_pages(GFP_KERNEL, get_order(len));
630 if (!ctx->buffer) {
631 dev_err(dd->dev, "unable to alloc pages.\n");
632 return -ENOMEM;
633 }
634
635 ctx->buflen = PAGE_SIZE << get_order(len);
636
637 return 0;
638}
639
640static void asr_sha_buff_cleanup(struct asr_bcm_sha *dd, uint32_t len)
641{
642 struct ahash_request *req = dd->req;
643 struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
644
645 free_pages((unsigned long)ctx->buffer, get_order(len));
646 ctx->buflen = 0;
647}
648
649static int sha_init_req(struct asr_sha_reqctx *ctx)
650{
651 int ret = 0;
652
653 /* hardware: hash init */
654 ret = hash_init(ctx, ctx->md.alg);
655 if (ret)
656 return -EINVAL;
657 return 0;
658}
659
660static int sha_update_req(struct asr_sha_reqctx *ctx)
661{
662 int ret = 0;
663 int bufcnt;
664 uint32_t buflen = ctx->total;
665
666 ret = asr_sha_buff_init(ctx->dd, ctx->total);
667 if (ret)
668 return -ENOMEM;
669
670 asr_sha_append_sg(ctx);
671 bufcnt = ctx->bufcnt;
672 ctx->bufcnt = 0;
673
674 /* hashware: hash process */
675 ret = hash_process(ctx, ctx->md.alg, ctx->buffer, bufcnt);
676 if (ret)
677 ret = -EINVAL;
678
679 asr_sha_buff_cleanup(ctx->dd, buflen);
680 return ret;
681}
682
683static void sha_finish_req(struct asr_sha_reqctx *ctx, int *err)
684{
685 uint8_t *hash = (uint8_t *)ctx->digest;
686
687 if (!(*err) && (ctx->flags & SHA_FLAGS_FINAL)) {
688 *err = hash_done(ctx, ctx->md.alg, (uint8_t *)hash);
689 asr_sha_copy_ready_hash(ctx->dd->req);
690 ctx->flags &= (~SHA_FLAGS_FINAL);
691 } else {
692 ctx->flags |= SHA_FLAGS_ERROR;
693 }
694}
695
696static void sha_next_req(struct asr_sha_reqctx *ctx, int *err)
697{
698 if (likely(!(*err) && (SHA_FLAGS_FINAL & ctx->flags)))
699 sha_finish_req(ctx, err);
700
701 (void)asr_sha_complete(ctx->dd, *err);
702}
703
704static int asr_sha_start(struct asr_bcm_sha *dd)
705{
706 int err = 0;
707 struct ahash_request *req = dd->req;
708 struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
709
710 mutex_lock(&dd->queue_lock);
711
712 if ((ctx->flags & SHA_FLAGS_INIT)) {
713 err = sha_init_req(ctx);
714 ctx->flags &= (~SHA_FLAGS_INIT);
715 if (err) {
716 mutex_unlock(&dd->queue_lock);
717 return err;
718 }
719 }
720
721 if (ctx->op == SHA_OP_UPDATE) {
722 err = sha_update_req(ctx);
723 if (!err && (ctx->flags & SHA_FLAGS_FINUP))
724 /* no final() after finup() */
725 sha_finish_req(ctx, &err);
726 } else if (ctx->op == SHA_OP_FINAL) {
727 sha_finish_req(ctx, &err);
728 }
729
730 if (unlikely(err != -EINPROGRESS)) {
731 /* Task will not finish it, so do it here */
732 sha_next_req(ctx, &err);
733 }
734
735 mutex_unlock(&dd->queue_lock);
736 return err;
737}
738
739static int asr_sha_cra_init(struct crypto_tfm *tfm)
740{
741 struct asr_sha_ctx *ctx = crypto_tfm_ctx(tfm);
742 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
743 sizeof(struct asr_sha_reqctx));
744 ctx->start = asr_sha_start;
745
746 return 0;
747}
748
749static void asr_sha_cra_exit(struct crypto_tfm *tfm)
750{
751 struct asr_sha_ctx *ctx = crypto_tfm_ctx(tfm);
752 memset(ctx, 0, sizeof(*ctx));
753}
754
755static inline void asr_sha_get(struct asr_bcm_sha *dd)
756{
757 mutex_lock(&dd->sha_lock);
758}
759
760static inline void asr_sha_put(struct asr_bcm_sha *dd)
761{
762 if(mutex_is_locked(&dd->sha_lock))
763 mutex_unlock(&dd->sha_lock);
764}
765
766static int asr_sha_init(struct ahash_request *req)
767{
768 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
769 struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
770 struct asr_bcm_sha *dd = asr_sha_local;
771
772 asr_sha_get(dd);
773
774 ctx->dd = dd;
775 memset(&ctx->md, 0, sizeof(ctx->md));
776 ctx->flags = 0;
777
778 switch (crypto_ahash_digestsize(tfm)) {
779 case MD5_DIGEST_SIZE:
780 ctx->flags |= SHA_FLAGS_MD5;
781 ctx->md.alg = HASH_MD5;
782 ctx->md.block_size = MD5_HMAC_BLOCK_SIZE;
783 break;
784 case SHA1_DIGEST_SIZE:
785 ctx->flags |= SHA_FLAGS_SHA1;
786 ctx->md.alg = HASH_SHA1;
787 ctx->md.block_size = SHA1_BLOCK_SIZE;
788 break;
789 case SHA224_DIGEST_SIZE:
790 ctx->flags |= SHA_FLAGS_SHA224;
791 ctx->md.alg = HASH_SHA224;
792 ctx->md.block_size = SHA224_BLOCK_SIZE;
793 break;
794 case SHA256_DIGEST_SIZE:
795 ctx->flags |= SHA_FLAGS_SHA256;
796 ctx->md.alg = HASH_SHA256;
797 ctx->md.block_size = SHA256_BLOCK_SIZE;
798 break;
799 case SHA384_DIGEST_SIZE:
800 ctx->flags |= SHA_FLAGS_SHA384;
801 ctx->md.alg = HASH_SHA384;
802 ctx->md.block_size = SHA384_BLOCK_SIZE;
803 break;
804 case SHA512_DIGEST_SIZE:
805 ctx->flags |= SHA_FLAGS_SHA512;
806 ctx->md.alg = HASH_SHA512;
807 ctx->md.block_size = SHA512_BLOCK_SIZE;
808 break;
809 default:
810 asr_sha_put(dd);
811 return -EINVAL;
812 }
813
814 ctx->bufcnt = 0;
815 ctx->flags |= SHA_FLAGS_INIT;
816
817 asr_sha_put(dd);
818 return 0;
819}
820
821static int asr_sha_update(struct ahash_request *req)
822{
823 int ret = 0;
824 struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
825
826 asr_sha_get(ctx->dd);
827
828 ctx->total = req->nbytes;
829 ctx->sg = req->src;
830 ctx->offset = 0;
831
832 ret = asr_sha_enqueue(req, SHA_OP_UPDATE);
833
834 asr_sha_put(ctx->dd);
835 return ret;
836}
837
838static int asr_sha_final(struct ahash_request *req)
839{
840 int ret = 0;
841 struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
842
843 asr_sha_get(ctx->dd);
844
845 ctx->flags |= SHA_FLAGS_FINAL;
846 if (ctx->flags & SHA_FLAGS_ERROR) {
847 asr_sha_put(ctx->dd);
848 return 0; /* uncompleted hash is not needed */
849 }
850 ret = asr_sha_enqueue(req, SHA_OP_FINAL);
851
852 asr_sha_put(ctx->dd);
853 return ret;
854}
855
856static int asr_sha_finup(struct ahash_request *req)
857{
858 struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
859 int err1, err2;
860
861 ctx->flags |= SHA_FLAGS_FINUP;
862
863 err1 = asr_sha_update(req);
864 if (err1 == -EINPROGRESS ||
865 (err1 == -EBUSY && (ahash_request_flags(req) &
866 CRYPTO_TFM_REQ_MAY_BACKLOG))) {
867 asr_sha_put(ctx->dd);
868 return err1;
869 }
870 /*
871 * final() has to be always called to cleanup resources
872 * even if udpate() failed, except EINPROGRESS
873 */
874 err2 = asr_sha_final(req);
875
876 return err1 ?: err2;
877}
878
879static int asr_sha_digest(struct ahash_request *req)
880{
881 return asr_sha_init(req) ?: asr_sha_finup(req);
882}
883
884static int asr_sha_export(struct ahash_request *req, void *out)
885{
886 const struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
887
888 memcpy(out, ctx, sizeof(*ctx));
889 return 0;
890}
891
892static int asr_sha_import(struct ahash_request *req, const void *in)
893{
894 struct asr_sha_reqctx *ctx = ahash_request_ctx(req);
895
896 memcpy(ctx, in, sizeof(*ctx));
897 return 0;
898}
899
900static struct ahash_alg sha_algs[] = {
901 /* md5 */
902 {
903 .init = asr_sha_init,
904 .update = asr_sha_update,
905 .final = asr_sha_final,
906 .finup = asr_sha_finup,
907 .digest = asr_sha_digest,
908 .export = asr_sha_export,
909 .import = asr_sha_import,
910 .halg = {
911 .digestsize = MD5_DIGEST_SIZE,
912 .statesize = sizeof(struct asr_sha_reqctx),
913 .base = {
914 .cra_name = "md5",
915 .cra_driver_name = "asr-md5",
916 .cra_priority = ASR_SHA_PRIORITY,
917 .cra_flags = CRYPTO_ALG_ASYNC,
918 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
919 .cra_ctxsize = sizeof(struct asr_sha_ctx),
920 .cra_alignmask = 0,
921 .cra_module = THIS_MODULE,
922 .cra_init = asr_sha_cra_init,
923 .cra_exit = asr_sha_cra_exit,
924 }
925 }
926 },
927
928 /* sha1 */
929 {
930 .init = asr_sha_init,
931 .update = asr_sha_update,
932 .final = asr_sha_final,
933 .finup = asr_sha_finup,
934 .digest = asr_sha_digest,
935 .export = asr_sha_export,
936 .import = asr_sha_import,
937 .halg = {
938 .digestsize = SHA1_DIGEST_SIZE,
939 .statesize = sizeof(struct asr_sha_reqctx),
940 .base = {
941 .cra_name = "sha1",
942 .cra_driver_name = "asr-sha1",
943 .cra_priority = ASR_SHA_PRIORITY,
944 .cra_flags = CRYPTO_ALG_ASYNC,
945 .cra_blocksize = SHA1_BLOCK_SIZE,
946 .cra_ctxsize = sizeof(struct asr_sha_ctx),
947 .cra_alignmask = 0,
948 .cra_module = THIS_MODULE,
949 .cra_init = asr_sha_cra_init,
950 .cra_exit = asr_sha_cra_exit,
951 }
952 }
953 },
954
955 /* sha224 */
956 {
957 .init = asr_sha_init,
958 .update = asr_sha_update,
959 .final = asr_sha_final,
960 .finup = asr_sha_finup,
961 .digest = asr_sha_digest,
962 .export = asr_sha_export,
963 .import = asr_sha_import,
964 .halg = {
965 .digestsize = SHA224_DIGEST_SIZE,
966 .statesize = sizeof(struct asr_sha_reqctx),
967 .base = {
968 .cra_name = "sha224",
969 .cra_driver_name = "asr-sha224",
970 .cra_priority = ASR_SHA_PRIORITY,
971 .cra_flags = CRYPTO_ALG_ASYNC,
972 .cra_blocksize = SHA224_BLOCK_SIZE,
973 .cra_ctxsize = sizeof(struct asr_sha_ctx),
974 .cra_alignmask = 0,
975 .cra_module = THIS_MODULE,
976 .cra_init = asr_sha_cra_init,
977 .cra_exit = asr_sha_cra_exit,
978 }
979 }
980 },
981
982 /* sha256 */
983 {
984 .init = asr_sha_init,
985 .update = asr_sha_update,
986 .final = asr_sha_final,
987 .finup = asr_sha_finup,
988 .digest = asr_sha_digest,
989 .export = asr_sha_export,
990 .import = asr_sha_import,
991 .halg = {
992 .digestsize = SHA256_DIGEST_SIZE,
993 .statesize = sizeof(struct asr_sha_reqctx),
994 .base = {
995 .cra_name = "sha256",
996 .cra_driver_name = "asr-sha256",
997 .cra_priority = ASR_SHA_PRIORITY,
998 .cra_flags = CRYPTO_ALG_ASYNC,
999 .cra_blocksize = SHA256_BLOCK_SIZE,
1000 .cra_ctxsize = sizeof(struct asr_sha_ctx),
1001 .cra_alignmask = 0,
1002 .cra_module = THIS_MODULE,
1003 .cra_init = asr_sha_cra_init,
1004 .cra_exit = asr_sha_cra_exit,
1005 }
1006 }
1007 },
1008
1009 /* sha384 */
1010 {
1011 .init = asr_sha_init,
1012 .update = asr_sha_update,
1013 .final = asr_sha_final,
1014 .finup = asr_sha_finup,
1015 .digest = asr_sha_digest,
1016 .export = asr_sha_export,
1017 .import = asr_sha_import,
1018 .halg = {
1019 .digestsize = SHA384_DIGEST_SIZE,
1020 .statesize = sizeof(struct asr_sha_reqctx),
1021 .base = {
1022 .cra_name = "sha384",
1023 .cra_driver_name = "asr-sha384",
1024 .cra_priority = ASR_SHA_PRIORITY,
1025 .cra_flags = CRYPTO_ALG_ASYNC,
1026 .cra_blocksize = SHA384_BLOCK_SIZE,
1027 .cra_ctxsize = sizeof(struct asr_sha_ctx),
1028 .cra_alignmask = 0,
1029 .cra_module = THIS_MODULE,
1030 .cra_init = asr_sha_cra_init,
1031 .cra_exit = asr_sha_cra_exit,
1032 }
1033 }
1034 },
1035
1036 /* sha512 */
1037 {
1038 .init = asr_sha_init,
1039 .update = asr_sha_update,
1040 .final = asr_sha_final,
1041 .finup = asr_sha_finup,
1042 .digest = asr_sha_digest,
1043 .export = asr_sha_export,
1044 .import = asr_sha_import,
1045 .halg = {
1046 .digestsize = SHA512_DIGEST_SIZE,
1047 .statesize = sizeof(struct asr_sha_reqctx),
1048 .base = {
1049 .cra_name = "sha512",
1050 .cra_driver_name = "asr-sha512",
1051 .cra_priority = ASR_SHA_PRIORITY,
1052 .cra_flags = CRYPTO_ALG_ASYNC,
1053 .cra_blocksize = SHA512_BLOCK_SIZE,
1054 .cra_ctxsize = sizeof(struct asr_sha_ctx),
1055 .cra_alignmask = 0,
1056 .cra_module = THIS_MODULE,
1057 .cra_init = asr_sha_cra_init,
1058 .cra_exit = asr_sha_cra_exit,
1059 }
1060 }
1061 },
1062};
1063
1064static void asr_sha_queue_task(unsigned long data)
1065{
1066 struct asr_bcm_sha *dd = (struct asr_bcm_sha *)data;
1067
1068 asr_sha_handle_queue(dd, NULL);
1069}
1070
1071#ifdef ASR_BCM_SHA_TEST
1072 static int bcm_sha_test(struct asr_bcm_sha *dd);
1073#endif
1074
1075int asr_bcm_sha_register(struct asr_bcm_dev *bcm_dd)
1076{
1077 int err, i, j;
1078 struct asr_bcm_sha *sha_dd;
1079
1080 sha_dd = &bcm_dd->asr_sha;
1081
1082 sha_dd->dev = bcm_dd->dev;
1083 sha_dd->io_base = bcm_dd->io_base;
1084 sha_dd->phys_base = bcm_dd->phys_base;
1085
1086 asr_sha_local = sha_dd;
1087
1088 spin_lock_init(&sha_dd->lock);
1089 mutex_init(&sha_dd->sha_lock);
1090 mutex_init(&sha_dd->queue_lock);
1091 tasklet_init(&sha_dd->queue_task, asr_sha_queue_task,
1092 (unsigned long)sha_dd);
1093 crypto_init_queue(&sha_dd->queue, ASR_SHA_QUEUE_LENGTH);
1094
1095 for (i = 0; i < ARRAY_SIZE(sha_algs); i++) {
1096 err = crypto_register_ahash(&sha_algs[i]);
1097 if (err)
1098 goto err_sha_algs;
1099 }
1100
1101#ifdef ASR_BCM_SHA_TEST
1102 bcm_sha_test(sha_dd);
1103#endif
1104
1105 return 0;
1106
1107err_sha_algs:
1108 for (j = 0; j < i; j++)
1109 crypto_unregister_ahash(&sha_algs[j]);
1110
1111 return err;
1112}
1113EXPORT_SYMBOL_GPL(asr_bcm_sha_register);
1114
1115int asr_bcm_sha_unregister(struct asr_bcm_dev *bcm_dd)
1116{
1117 int i;
1118 struct asr_bcm_sha *sha_dd = &bcm_dd->asr_sha;
1119
1120
1121 for (i = 0; i < ARRAY_SIZE(sha_algs); i++)
1122 crypto_unregister_ahash(&sha_algs[i]);
1123
1124 tasklet_kill(&sha_dd->queue_task);
1125
1126 return 0;
1127}
1128EXPORT_SYMBOL_GPL(asr_bcm_sha_unregister);
1129
1130#ifdef ASR_BCM_SHA_TEST
1131
1132static int bcm_sha_test(struct asr_bcm_sha *dd)
1133{
1134 int ret = 0;
1135
1136 const struct {
1137 const char *msg;
1138 uint8_t hash[20];
1139 } sha1_tests[] = {
1140 {
1141 "abc",
1142 { 0xa9, 0x99, 0x3e, 0x36, 0x47, 0x06,
1143 0x81, 0x6a, 0xba, 0x3e, 0x25, 0x71,
1144 0x78, 0x50, 0xc2, 0x6c, 0x9c, 0xd0,
1145 0xd8, 0x9d
1146 }
1147 },
1148 {
1149 "asjhsdjljfdsdjjkdfwyqeuwouzxkmcxjkmwqds"
1150 "jklfdfjlkdfkfsfkjlfskjdflioherfjjfdjkfd"
1151 "nkfdfdojjodfjdfjflj;sljjlfkklnfnkgbhhoi"
1152 "gfhigfopojpfjojpoffkjlfskjdflioherfjjfd"
1153 "jkfdnkfdfdojjodfjdfjfljnfnkgbhhoigfhigf"
1154 "oponfnkgbhhoigfhigfopojpfjoewiroiowiods"
1155 "djkisijdknknkskdnknflnnesniewinoinknmdn"
1156 "kknknsdnjjfsnnkfnkknslnklknfnknkflksnlk"
1157 "lskldklklklnmlflmlmlfmlfml",
1158 {
1159 0xc4, 0x53, 0xca, 0x24, 0xfa, 0xe5,
1160 0x39, 0x53, 0x08, 0x8c, 0x57, 0x1a,
1161 0x96, 0xe9, 0x64, 0x7f, 0xd5, 0xf9,
1162 0x13, 0x91
1163 }
1164 }
1165 };
1166
1167 struct asr_sha_reqctx ctx1;
1168 struct asr_sha_reqctx ctx2;
1169
1170 unsigned char out_sha1_1[20] = {0};
1171 unsigned char out_sha1_2[20] = {0};
1172
1173 memset(&ctx1.md, 0, sizeof(ctx1.md));
1174 ctx1.md.block_size = BLOCK_ALGIN_SIZE;
1175 ctx1.dd = dd;
1176
1177 memset(&ctx2.md, 0, sizeof(ctx2.md));
1178 ctx2.md.block_size = BLOCK_ALGIN_SIZE;
1179 ctx2.dd = dd;
1180
1181 ret = hash_init(&ctx1, HASH_SHA1);
1182 if (ret) {
1183 return ret;
1184 }
1185 ret = hash_init(&ctx2, HASH_SHA1);
1186 if (ret) {
1187 return ret;
1188 }
1189 ret = hash_process(&ctx1, HASH_SHA1, (uint8_t *)sha1_tests[0].msg, strlen(sha1_tests[0].msg));
1190 if (ret) {
1191 return ret;
1192 }
1193 ret = hash_done(&ctx1, HASH_SHA1, out_sha1_1);
1194 if (ret) {
1195 return ret;
1196 }
1197 ret = hash_process(&ctx2, HASH_SHA1, (uint8_t *)sha1_tests[1].msg, strlen(sha1_tests[1].msg));
1198 if (ret) {
1199 return ret;
1200 }
1201 ret = hash_done(&ctx2, HASH_SHA1, out_sha1_2);
1202 if (ret) {
1203 return ret;
1204 }
1205
1206 if (memcmp(out_sha1_1, sha1_tests[0].hash, sizeof(out_sha1_1))) {
1207 printk("sha1 test 0 failed");
1208 } else {
1209 printk("sha1 test 0 pass");
1210 }
1211 if (memcmp(out_sha1_2, sha1_tests[1].hash, sizeof(out_sha1_2))) {
1212 printk("sha1 test 1 failed");
1213 } else {
1214 printk("sha1 test 1 pass");
1215 }
1216
1217 return 0;
1218}
1219#endif
1220
1221MODULE_LICENSE("GPL");
1222MODULE_AUTHOR("wangyonggan <yongganwang@asrmicro.com>");
1223MODULE_DESCRIPTION("ASR bcm sha driver");