blob: 626b643d610ebc6b925b283daf1e6735cc16c256 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * AMD Cryptographic Coprocessor (CCP) driver
3 *
4 * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
5 *
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 * Author: Gary R Hook <gary.hook@amd.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/pci.h>
17#include <linux/interrupt.h>
18#include <crypto/scatterwalk.h>
19#include <crypto/des.h>
20#include <linux/ccp.h>
21
22#include "ccp-dev.h"
23
24/* SHA initial context values */
25static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = {
26 cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
27 cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
28 cpu_to_be32(SHA1_H4),
29};
30
31static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
32 cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
33 cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
34 cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
35 cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
36};
37
38static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
39 cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
40 cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
41 cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
42 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
43};
44
45static const __be64 ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
46 cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1),
47 cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3),
48 cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5),
49 cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7),
50};
51
52static const __be64 ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
53 cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1),
54 cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3),
55 cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5),
56 cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7),
57};
58
59#define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
60 ccp_gen_jobid(ccp) : 0)
61
62static u32 ccp_gen_jobid(struct ccp_device *ccp)
63{
64 return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK;
65}
66
67static void ccp_sg_free(struct ccp_sg_workarea *wa)
68{
69 if (wa->dma_count)
70 dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir);
71
72 wa->dma_count = 0;
73}
74
75static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
76 struct scatterlist *sg, u64 len,
77 enum dma_data_direction dma_dir)
78{
79 memset(wa, 0, sizeof(*wa));
80
81 wa->sg = sg;
82 if (!sg)
83 return 0;
84
85 wa->nents = sg_nents_for_len(sg, len);
86 if (wa->nents < 0)
87 return wa->nents;
88
89 wa->bytes_left = len;
90 wa->sg_used = 0;
91
92 if (len == 0)
93 return 0;
94
95 if (dma_dir == DMA_NONE)
96 return 0;
97
98 wa->dma_sg = sg;
99 wa->dma_sg_head = sg;
100 wa->dma_dev = dev;
101 wa->dma_dir = dma_dir;
102 wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
103 if (!wa->dma_count)
104 return -ENOMEM;
105
106 return 0;
107}
108
109static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
110{
111 unsigned int nbytes = min_t(u64, len, wa->bytes_left);
112 unsigned int sg_combined_len = 0;
113
114 if (!wa->sg)
115 return;
116
117 wa->sg_used += nbytes;
118 wa->bytes_left -= nbytes;
119 if (wa->sg_used == sg_dma_len(wa->dma_sg)) {
120 /* Advance to the next DMA scatterlist entry */
121 wa->dma_sg = sg_next(wa->dma_sg);
122
123 /* In the case that the DMA mapped scatterlist has entries
124 * that have been merged, the non-DMA mapped scatterlist
125 * must be advanced multiple times for each merged entry.
126 * This ensures that the current non-DMA mapped entry
127 * corresponds to the current DMA mapped entry.
128 */
129 do {
130 sg_combined_len += wa->sg->length;
131 wa->sg = sg_next(wa->sg);
132 } while (wa->sg_used > sg_combined_len);
133
134 wa->sg_used = 0;
135 }
136}
137
138static void ccp_dm_free(struct ccp_dm_workarea *wa)
139{
140 if (wa->length <= CCP_DMAPOOL_MAX_SIZE) {
141 if (wa->address)
142 dma_pool_free(wa->dma_pool, wa->address,
143 wa->dma.address);
144 } else {
145 if (wa->dma.address)
146 dma_unmap_single(wa->dev, wa->dma.address, wa->length,
147 wa->dma.dir);
148 kfree(wa->address);
149 }
150
151 wa->address = NULL;
152 wa->dma.address = 0;
153}
154
155static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
156 struct ccp_cmd_queue *cmd_q,
157 unsigned int len,
158 enum dma_data_direction dir)
159{
160 memset(wa, 0, sizeof(*wa));
161
162 if (!len)
163 return 0;
164
165 wa->dev = cmd_q->ccp->dev;
166 wa->length = len;
167
168 if (len <= CCP_DMAPOOL_MAX_SIZE) {
169 wa->dma_pool = cmd_q->dma_pool;
170
171 wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL,
172 &wa->dma.address);
173 if (!wa->address)
174 return -ENOMEM;
175
176 wa->dma.length = CCP_DMAPOOL_MAX_SIZE;
177
178 memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE);
179 } else {
180 wa->address = kzalloc(len, GFP_KERNEL);
181 if (!wa->address)
182 return -ENOMEM;
183
184 wa->dma.address = dma_map_single(wa->dev, wa->address, len,
185 dir);
186 if (dma_mapping_error(wa->dev, wa->dma.address))
187 return -ENOMEM;
188
189 wa->dma.length = len;
190 }
191 wa->dma.dir = dir;
192
193 return 0;
194}
195
196static int ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
197 struct scatterlist *sg, unsigned int sg_offset,
198 unsigned int len)
199{
200 WARN_ON(!wa->address);
201
202 if (len > (wa->length - wa_offset))
203 return -EINVAL;
204
205 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
206 0);
207 return 0;
208}
209
210static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
211 struct scatterlist *sg, unsigned int sg_offset,
212 unsigned int len)
213{
214 WARN_ON(!wa->address);
215
216 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
217 1);
218}
219
220static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
221 unsigned int wa_offset,
222 struct scatterlist *sg,
223 unsigned int sg_offset,
224 unsigned int len)
225{
226 u8 *p, *q;
227 int rc;
228
229 rc = ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
230 if (rc)
231 return rc;
232
233 p = wa->address + wa_offset;
234 q = p + len - 1;
235 while (p < q) {
236 *p = *p ^ *q;
237 *q = *p ^ *q;
238 *p = *p ^ *q;
239 p++;
240 q--;
241 }
242 return 0;
243}
244
245static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
246 unsigned int wa_offset,
247 struct scatterlist *sg,
248 unsigned int sg_offset,
249 unsigned int len)
250{
251 u8 *p, *q;
252
253 p = wa->address + wa_offset;
254 q = p + len - 1;
255 while (p < q) {
256 *p = *p ^ *q;
257 *q = *p ^ *q;
258 *p = *p ^ *q;
259 p++;
260 q--;
261 }
262
263 ccp_get_dm_area(wa, wa_offset, sg, sg_offset, len);
264}
265
266static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
267{
268 ccp_dm_free(&data->dm_wa);
269 ccp_sg_free(&data->sg_wa);
270}
271
272static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q,
273 struct scatterlist *sg, u64 sg_len,
274 unsigned int dm_len,
275 enum dma_data_direction dir)
276{
277 int ret;
278
279 memset(data, 0, sizeof(*data));
280
281 ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len,
282 dir);
283 if (ret)
284 goto e_err;
285
286 ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir);
287 if (ret)
288 goto e_err;
289
290 return 0;
291
292e_err:
293 ccp_free_data(data, cmd_q);
294
295 return ret;
296}
297
298static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
299{
300 struct ccp_sg_workarea *sg_wa = &data->sg_wa;
301 struct ccp_dm_workarea *dm_wa = &data->dm_wa;
302 unsigned int buf_count, nbytes;
303
304 /* Clear the buffer if setting it */
305 if (!from)
306 memset(dm_wa->address, 0, dm_wa->length);
307
308 if (!sg_wa->sg)
309 return 0;
310
311 /* Perform the copy operation
312 * nbytes will always be <= UINT_MAX because dm_wa->length is
313 * an unsigned int
314 */
315 nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length);
316 scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used,
317 nbytes, from);
318
319 /* Update the structures and generate the count */
320 buf_count = 0;
321 while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
322 nbytes = min(sg_dma_len(sg_wa->dma_sg) - sg_wa->sg_used,
323 dm_wa->length - buf_count);
324 nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
325
326 buf_count += nbytes;
327 ccp_update_sg_workarea(sg_wa, nbytes);
328 }
329
330 return buf_count;
331}
332
333static unsigned int ccp_fill_queue_buf(struct ccp_data *data)
334{
335 return ccp_queue_buf(data, 0);
336}
337
338static unsigned int ccp_empty_queue_buf(struct ccp_data *data)
339{
340 return ccp_queue_buf(data, 1);
341}
342
343static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
344 struct ccp_op *op, unsigned int block_size,
345 bool blocksize_op)
346{
347 unsigned int sg_src_len, sg_dst_len, op_len;
348
349 /* The CCP can only DMA from/to one address each per operation. This
350 * requires that we find the smallest DMA area between the source
351 * and destination. The resulting len values will always be <= UINT_MAX
352 * because the dma length is an unsigned int.
353 */
354 sg_src_len = sg_dma_len(src->sg_wa.dma_sg) - src->sg_wa.sg_used;
355 sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
356
357 if (dst) {
358 sg_dst_len = sg_dma_len(dst->sg_wa.dma_sg) - dst->sg_wa.sg_used;
359 sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
360 op_len = min(sg_src_len, sg_dst_len);
361 } else {
362 op_len = sg_src_len;
363 }
364
365 /* The data operation length will be at least block_size in length
366 * or the smaller of available sg room remaining for the source or
367 * the destination
368 */
369 op_len = max(op_len, block_size);
370
371 /* Unless we have to buffer data, there's no reason to wait */
372 op->soc = 0;
373
374 if (sg_src_len < block_size) {
375 /* Not enough data in the sg element, so it
376 * needs to be buffered into a blocksize chunk
377 */
378 int cp_len = ccp_fill_queue_buf(src);
379
380 op->soc = 1;
381 op->src.u.dma.address = src->dm_wa.dma.address;
382 op->src.u.dma.offset = 0;
383 op->src.u.dma.length = (blocksize_op) ? block_size : cp_len;
384 } else {
385 /* Enough data in the sg element, but we need to
386 * adjust for any previously copied data
387 */
388 op->src.u.dma.address = sg_dma_address(src->sg_wa.dma_sg);
389 op->src.u.dma.offset = src->sg_wa.sg_used;
390 op->src.u.dma.length = op_len & ~(block_size - 1);
391
392 ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length);
393 }
394
395 if (dst) {
396 if (sg_dst_len < block_size) {
397 /* Not enough room in the sg element or we're on the
398 * last piece of data (when using padding), so the
399 * output needs to be buffered into a blocksize chunk
400 */
401 op->soc = 1;
402 op->dst.u.dma.address = dst->dm_wa.dma.address;
403 op->dst.u.dma.offset = 0;
404 op->dst.u.dma.length = op->src.u.dma.length;
405 } else {
406 /* Enough room in the sg element, but we need to
407 * adjust for any previously used area
408 */
409 op->dst.u.dma.address = sg_dma_address(dst->sg_wa.dma_sg);
410 op->dst.u.dma.offset = dst->sg_wa.sg_used;
411 op->dst.u.dma.length = op->src.u.dma.length;
412 }
413 }
414}
415
416static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst,
417 struct ccp_op *op)
418{
419 op->init = 0;
420
421 if (dst) {
422 if (op->dst.u.dma.address == dst->dm_wa.dma.address)
423 ccp_empty_queue_buf(dst);
424 else
425 ccp_update_sg_workarea(&dst->sg_wa,
426 op->dst.u.dma.length);
427 }
428}
429
430static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q,
431 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
432 u32 byte_swap, bool from)
433{
434 struct ccp_op op;
435
436 memset(&op, 0, sizeof(op));
437
438 op.cmd_q = cmd_q;
439 op.jobid = jobid;
440 op.eom = 1;
441
442 if (from) {
443 op.soc = 1;
444 op.src.type = CCP_MEMTYPE_SB;
445 op.src.u.sb = sb;
446 op.dst.type = CCP_MEMTYPE_SYSTEM;
447 op.dst.u.dma.address = wa->dma.address;
448 op.dst.u.dma.length = wa->length;
449 } else {
450 op.src.type = CCP_MEMTYPE_SYSTEM;
451 op.src.u.dma.address = wa->dma.address;
452 op.src.u.dma.length = wa->length;
453 op.dst.type = CCP_MEMTYPE_SB;
454 op.dst.u.sb = sb;
455 }
456
457 op.u.passthru.byte_swap = byte_swap;
458
459 return cmd_q->ccp->vdata->perform->passthru(&op);
460}
461
462static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q,
463 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
464 u32 byte_swap)
465{
466 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false);
467}
468
469static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q,
470 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
471 u32 byte_swap)
472{
473 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true);
474}
475
476static noinline_for_stack int
477ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
478{
479 struct ccp_aes_engine *aes = &cmd->u.aes;
480 struct ccp_dm_workarea key, ctx;
481 struct ccp_data src;
482 struct ccp_op op;
483 unsigned int dm_offset;
484 int ret;
485
486 if (!((aes->key_len == AES_KEYSIZE_128) ||
487 (aes->key_len == AES_KEYSIZE_192) ||
488 (aes->key_len == AES_KEYSIZE_256)))
489 return -EINVAL;
490
491 if (aes->src_len & (AES_BLOCK_SIZE - 1))
492 return -EINVAL;
493
494 if (aes->iv_len != AES_BLOCK_SIZE)
495 return -EINVAL;
496
497 if (!aes->key || !aes->iv || !aes->src)
498 return -EINVAL;
499
500 if (aes->cmac_final) {
501 if (aes->cmac_key_len != AES_BLOCK_SIZE)
502 return -EINVAL;
503
504 if (!aes->cmac_key)
505 return -EINVAL;
506 }
507
508 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
509 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
510
511 ret = -EIO;
512 memset(&op, 0, sizeof(op));
513 op.cmd_q = cmd_q;
514 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
515 op.sb_key = cmd_q->sb_key;
516 op.sb_ctx = cmd_q->sb_ctx;
517 op.init = 1;
518 op.u.aes.type = aes->type;
519 op.u.aes.mode = aes->mode;
520 op.u.aes.action = aes->action;
521
522 /* All supported key sizes fit in a single (32-byte) SB entry
523 * and must be in little endian format. Use the 256-bit byte
524 * swap passthru option to convert from big endian to little
525 * endian.
526 */
527 ret = ccp_init_dm_workarea(&key, cmd_q,
528 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
529 DMA_TO_DEVICE);
530 if (ret)
531 return ret;
532
533 dm_offset = CCP_SB_BYTES - aes->key_len;
534 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
535 if (ret)
536 goto e_key;
537 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
538 CCP_PASSTHRU_BYTESWAP_256BIT);
539 if (ret) {
540 cmd->engine_error = cmd_q->cmd_error;
541 goto e_key;
542 }
543
544 /* The AES context fits in a single (32-byte) SB entry and
545 * must be in little endian format. Use the 256-bit byte swap
546 * passthru option to convert from big endian to little endian.
547 */
548 ret = ccp_init_dm_workarea(&ctx, cmd_q,
549 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
550 DMA_BIDIRECTIONAL);
551 if (ret)
552 goto e_key;
553
554 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
555 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
556 if (ret)
557 goto e_ctx;
558 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
559 CCP_PASSTHRU_BYTESWAP_256BIT);
560 if (ret) {
561 cmd->engine_error = cmd_q->cmd_error;
562 goto e_ctx;
563 }
564
565 /* Send data to the CCP AES engine */
566 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
567 AES_BLOCK_SIZE, DMA_TO_DEVICE);
568 if (ret)
569 goto e_ctx;
570
571 while (src.sg_wa.bytes_left) {
572 ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true);
573 if (aes->cmac_final && !src.sg_wa.bytes_left) {
574 op.eom = 1;
575
576 /* Push the K1/K2 key to the CCP now */
577 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid,
578 op.sb_ctx,
579 CCP_PASSTHRU_BYTESWAP_256BIT);
580 if (ret) {
581 cmd->engine_error = cmd_q->cmd_error;
582 goto e_src;
583 }
584
585 ret = ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
586 aes->cmac_key_len);
587 if (ret)
588 goto e_src;
589 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
590 CCP_PASSTHRU_BYTESWAP_256BIT);
591 if (ret) {
592 cmd->engine_error = cmd_q->cmd_error;
593 goto e_src;
594 }
595 }
596
597 ret = cmd_q->ccp->vdata->perform->aes(&op);
598 if (ret) {
599 cmd->engine_error = cmd_q->cmd_error;
600 goto e_src;
601 }
602
603 ccp_process_data(&src, NULL, &op);
604 }
605
606 /* Retrieve the AES context - convert from LE to BE using
607 * 32-byte (256-bit) byteswapping
608 */
609 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
610 CCP_PASSTHRU_BYTESWAP_256BIT);
611 if (ret) {
612 cmd->engine_error = cmd_q->cmd_error;
613 goto e_src;
614 }
615
616 /* ...but we only need AES_BLOCK_SIZE bytes */
617 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
618 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
619
620e_src:
621 ccp_free_data(&src, cmd_q);
622
623e_ctx:
624 ccp_dm_free(&ctx);
625
626e_key:
627 ccp_dm_free(&key);
628
629 return ret;
630}
631
632static noinline_for_stack int
633ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
634{
635 struct ccp_aes_engine *aes = &cmd->u.aes;
636 struct ccp_dm_workarea key, ctx, final_wa, tag;
637 struct ccp_data src, dst;
638 struct ccp_data aad;
639 struct ccp_op op;
640
641 unsigned long long *final;
642 unsigned int dm_offset;
643 unsigned int authsize;
644 unsigned int jobid;
645 unsigned int ilen;
646 bool in_place = true; /* Default value */
647 int ret;
648
649 struct scatterlist *p_inp, sg_inp[2];
650 struct scatterlist *p_tag, sg_tag[2];
651 struct scatterlist *p_outp, sg_outp[2];
652 struct scatterlist *p_aad;
653
654 if (!aes->iv)
655 return -EINVAL;
656
657 if (!((aes->key_len == AES_KEYSIZE_128) ||
658 (aes->key_len == AES_KEYSIZE_192) ||
659 (aes->key_len == AES_KEYSIZE_256)))
660 return -EINVAL;
661
662 if (!aes->key) /* Gotta have a key SGL */
663 return -EINVAL;
664
665 /* Zero defaults to 16 bytes, the maximum size */
666 authsize = aes->authsize ? aes->authsize : AES_BLOCK_SIZE;
667 switch (authsize) {
668 case 16:
669 case 15:
670 case 14:
671 case 13:
672 case 12:
673 case 8:
674 case 4:
675 break;
676 default:
677 return -EINVAL;
678 }
679
680 /* First, decompose the source buffer into AAD & PT,
681 * and the destination buffer into AAD, CT & tag, or
682 * the input into CT & tag.
683 * It is expected that the input and output SGs will
684 * be valid, even if the AAD and input lengths are 0.
685 */
686 p_aad = aes->src;
687 p_inp = scatterwalk_ffwd(sg_inp, aes->src, aes->aad_len);
688 p_outp = scatterwalk_ffwd(sg_outp, aes->dst, aes->aad_len);
689 if (aes->action == CCP_AES_ACTION_ENCRYPT) {
690 ilen = aes->src_len;
691 p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
692 } else {
693 /* Input length for decryption includes tag */
694 ilen = aes->src_len - authsize;
695 p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
696 }
697
698 jobid = CCP_NEW_JOBID(cmd_q->ccp);
699
700 memset(&op, 0, sizeof(op));
701 op.cmd_q = cmd_q;
702 op.jobid = jobid;
703 op.sb_key = cmd_q->sb_key; /* Pre-allocated */
704 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
705 op.init = 1;
706 op.u.aes.type = aes->type;
707
708 /* Copy the key to the LSB */
709 ret = ccp_init_dm_workarea(&key, cmd_q,
710 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
711 DMA_TO_DEVICE);
712 if (ret)
713 return ret;
714
715 dm_offset = CCP_SB_BYTES - aes->key_len;
716 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
717 if (ret)
718 goto e_key;
719 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
720 CCP_PASSTHRU_BYTESWAP_256BIT);
721 if (ret) {
722 cmd->engine_error = cmd_q->cmd_error;
723 goto e_key;
724 }
725
726 /* Copy the context (IV) to the LSB.
727 * There is an assumption here that the IV is 96 bits in length, plus
728 * a nonce of 32 bits. If no IV is present, use a zeroed buffer.
729 */
730 ret = ccp_init_dm_workarea(&ctx, cmd_q,
731 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
732 DMA_BIDIRECTIONAL);
733 if (ret)
734 goto e_key;
735
736 dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
737 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
738 if (ret)
739 goto e_ctx;
740
741 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
742 CCP_PASSTHRU_BYTESWAP_256BIT);
743 if (ret) {
744 cmd->engine_error = cmd_q->cmd_error;
745 goto e_ctx;
746 }
747
748 op.init = 1;
749 if (aes->aad_len > 0) {
750 /* Step 1: Run a GHASH over the Additional Authenticated Data */
751 ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len,
752 AES_BLOCK_SIZE,
753 DMA_TO_DEVICE);
754 if (ret)
755 goto e_ctx;
756
757 op.u.aes.mode = CCP_AES_MODE_GHASH;
758 op.u.aes.action = CCP_AES_GHASHAAD;
759
760 while (aad.sg_wa.bytes_left) {
761 ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true);
762
763 ret = cmd_q->ccp->vdata->perform->aes(&op);
764 if (ret) {
765 cmd->engine_error = cmd_q->cmd_error;
766 goto e_aad;
767 }
768
769 ccp_process_data(&aad, NULL, &op);
770 op.init = 0;
771 }
772 }
773
774 op.u.aes.mode = CCP_AES_MODE_GCTR;
775 op.u.aes.action = aes->action;
776
777 if (ilen > 0) {
778 /* Step 2: Run a GCTR over the plaintext */
779 in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false;
780
781 ret = ccp_init_data(&src, cmd_q, p_inp, ilen,
782 AES_BLOCK_SIZE,
783 in_place ? DMA_BIDIRECTIONAL
784 : DMA_TO_DEVICE);
785 if (ret)
786 goto e_ctx;
787
788 if (in_place) {
789 dst = src;
790 } else {
791 ret = ccp_init_data(&dst, cmd_q, p_outp, ilen,
792 AES_BLOCK_SIZE, DMA_FROM_DEVICE);
793 if (ret)
794 goto e_src;
795 }
796
797 op.soc = 0;
798 op.eom = 0;
799 op.init = 1;
800 while (src.sg_wa.bytes_left) {
801 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
802 if (!src.sg_wa.bytes_left) {
803 unsigned int nbytes = ilen % AES_BLOCK_SIZE;
804
805 if (nbytes) {
806 op.eom = 1;
807 op.u.aes.size = (nbytes * 8) - 1;
808 }
809 }
810
811 ret = cmd_q->ccp->vdata->perform->aes(&op);
812 if (ret) {
813 cmd->engine_error = cmd_q->cmd_error;
814 goto e_dst;
815 }
816
817 ccp_process_data(&src, &dst, &op);
818 op.init = 0;
819 }
820 }
821
822 /* Step 3: Update the IV portion of the context with the original IV */
823 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
824 CCP_PASSTHRU_BYTESWAP_256BIT);
825 if (ret) {
826 cmd->engine_error = cmd_q->cmd_error;
827 goto e_dst;
828 }
829
830 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
831 if (ret)
832 goto e_dst;
833
834 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
835 CCP_PASSTHRU_BYTESWAP_256BIT);
836 if (ret) {
837 cmd->engine_error = cmd_q->cmd_error;
838 goto e_dst;
839 }
840
841 /* Step 4: Concatenate the lengths of the AAD and source, and
842 * hash that 16 byte buffer.
843 */
844 ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE,
845 DMA_BIDIRECTIONAL);
846 if (ret)
847 goto e_dst;
848 final = (unsigned long long *) final_wa.address;
849 final[0] = cpu_to_be64(aes->aad_len * 8);
850 final[1] = cpu_to_be64(ilen * 8);
851
852 memset(&op, 0, sizeof(op));
853 op.cmd_q = cmd_q;
854 op.jobid = jobid;
855 op.sb_key = cmd_q->sb_key; /* Pre-allocated */
856 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
857 op.init = 1;
858 op.u.aes.type = aes->type;
859 op.u.aes.mode = CCP_AES_MODE_GHASH;
860 op.u.aes.action = CCP_AES_GHASHFINAL;
861 op.src.type = CCP_MEMTYPE_SYSTEM;
862 op.src.u.dma.address = final_wa.dma.address;
863 op.src.u.dma.length = AES_BLOCK_SIZE;
864 op.dst.type = CCP_MEMTYPE_SYSTEM;
865 op.dst.u.dma.address = final_wa.dma.address;
866 op.dst.u.dma.length = AES_BLOCK_SIZE;
867 op.eom = 1;
868 op.u.aes.size = 0;
869 ret = cmd_q->ccp->vdata->perform->aes(&op);
870 if (ret)
871 goto e_dst;
872
873 if (aes->action == CCP_AES_ACTION_ENCRYPT) {
874 /* Put the ciphered tag after the ciphertext. */
875 ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize);
876 } else {
877 /* Does this ciphered tag match the input? */
878 ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
879 DMA_BIDIRECTIONAL);
880 if (ret)
881 goto e_tag;
882 ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
883 if (ret)
884 goto e_tag;
885
886 ret = crypto_memneq(tag.address, final_wa.address,
887 authsize) ? -EBADMSG : 0;
888 ccp_dm_free(&tag);
889 }
890
891e_tag:
892 ccp_dm_free(&final_wa);
893
894e_dst:
895 if (ilen > 0 && !in_place)
896 ccp_free_data(&dst, cmd_q);
897
898e_src:
899 if (ilen > 0)
900 ccp_free_data(&src, cmd_q);
901
902e_aad:
903 if (aes->aad_len)
904 ccp_free_data(&aad, cmd_q);
905
906e_ctx:
907 ccp_dm_free(&ctx);
908
909e_key:
910 ccp_dm_free(&key);
911
912 return ret;
913}
914
915static noinline_for_stack int
916ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
917{
918 struct ccp_aes_engine *aes = &cmd->u.aes;
919 struct ccp_dm_workarea key, ctx;
920 struct ccp_data src, dst;
921 struct ccp_op op;
922 unsigned int dm_offset;
923 bool in_place = false;
924 int ret;
925
926 if (!((aes->key_len == AES_KEYSIZE_128) ||
927 (aes->key_len == AES_KEYSIZE_192) ||
928 (aes->key_len == AES_KEYSIZE_256)))
929 return -EINVAL;
930
931 if (((aes->mode == CCP_AES_MODE_ECB) ||
932 (aes->mode == CCP_AES_MODE_CBC) ||
933 (aes->mode == CCP_AES_MODE_CFB)) &&
934 (aes->src_len & (AES_BLOCK_SIZE - 1)))
935 return -EINVAL;
936
937 if (!aes->key || !aes->src || !aes->dst)
938 return -EINVAL;
939
940 if (aes->mode != CCP_AES_MODE_ECB) {
941 if (aes->iv_len != AES_BLOCK_SIZE)
942 return -EINVAL;
943
944 if (!aes->iv)
945 return -EINVAL;
946 }
947
948 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
949 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
950
951 ret = -EIO;
952 memset(&op, 0, sizeof(op));
953 op.cmd_q = cmd_q;
954 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
955 op.sb_key = cmd_q->sb_key;
956 op.sb_ctx = cmd_q->sb_ctx;
957 op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
958 op.u.aes.type = aes->type;
959 op.u.aes.mode = aes->mode;
960 op.u.aes.action = aes->action;
961
962 /* All supported key sizes fit in a single (32-byte) SB entry
963 * and must be in little endian format. Use the 256-bit byte
964 * swap passthru option to convert from big endian to little
965 * endian.
966 */
967 ret = ccp_init_dm_workarea(&key, cmd_q,
968 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
969 DMA_TO_DEVICE);
970 if (ret)
971 return ret;
972
973 dm_offset = CCP_SB_BYTES - aes->key_len;
974 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
975 if (ret)
976 goto e_key;
977 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
978 CCP_PASSTHRU_BYTESWAP_256BIT);
979 if (ret) {
980 cmd->engine_error = cmd_q->cmd_error;
981 goto e_key;
982 }
983
984 /* The AES context fits in a single (32-byte) SB entry and
985 * must be in little endian format. Use the 256-bit byte swap
986 * passthru option to convert from big endian to little endian.
987 */
988 ret = ccp_init_dm_workarea(&ctx, cmd_q,
989 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
990 DMA_BIDIRECTIONAL);
991 if (ret)
992 goto e_key;
993
994 if (aes->mode != CCP_AES_MODE_ECB) {
995 /* Load the AES context - convert to LE */
996 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
997 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
998 if (ret)
999 goto e_ctx;
1000 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1001 CCP_PASSTHRU_BYTESWAP_256BIT);
1002 if (ret) {
1003 cmd->engine_error = cmd_q->cmd_error;
1004 goto e_ctx;
1005 }
1006 }
1007 switch (aes->mode) {
1008 case CCP_AES_MODE_CFB: /* CFB128 only */
1009 case CCP_AES_MODE_CTR:
1010 op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1;
1011 break;
1012 default:
1013 op.u.aes.size = 0;
1014 }
1015
1016 /* Prepare the input and output data workareas. For in-place
1017 * operations we need to set the dma direction to BIDIRECTIONAL
1018 * and copy the src workarea to the dst workarea.
1019 */
1020 if (sg_virt(aes->src) == sg_virt(aes->dst))
1021 in_place = true;
1022
1023 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
1024 AES_BLOCK_SIZE,
1025 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1026 if (ret)
1027 goto e_ctx;
1028
1029 if (in_place) {
1030 dst = src;
1031 } else {
1032 ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len,
1033 AES_BLOCK_SIZE, DMA_FROM_DEVICE);
1034 if (ret)
1035 goto e_src;
1036 }
1037
1038 /* Send data to the CCP AES engine */
1039 while (src.sg_wa.bytes_left) {
1040 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
1041 if (!src.sg_wa.bytes_left) {
1042 op.eom = 1;
1043
1044 /* Since we don't retrieve the AES context in ECB
1045 * mode we have to wait for the operation to complete
1046 * on the last piece of data
1047 */
1048 if (aes->mode == CCP_AES_MODE_ECB)
1049 op.soc = 1;
1050 }
1051
1052 ret = cmd_q->ccp->vdata->perform->aes(&op);
1053 if (ret) {
1054 cmd->engine_error = cmd_q->cmd_error;
1055 goto e_dst;
1056 }
1057
1058 ccp_process_data(&src, &dst, &op);
1059 }
1060
1061 if (aes->mode != CCP_AES_MODE_ECB) {
1062 /* Retrieve the AES context - convert from LE to BE using
1063 * 32-byte (256-bit) byteswapping
1064 */
1065 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1066 CCP_PASSTHRU_BYTESWAP_256BIT);
1067 if (ret) {
1068 cmd->engine_error = cmd_q->cmd_error;
1069 goto e_dst;
1070 }
1071
1072 /* ...but we only need AES_BLOCK_SIZE bytes */
1073 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
1074 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
1075 }
1076
1077e_dst:
1078 if (!in_place)
1079 ccp_free_data(&dst, cmd_q);
1080
1081e_src:
1082 ccp_free_data(&src, cmd_q);
1083
1084e_ctx:
1085 ccp_dm_free(&ctx);
1086
1087e_key:
1088 ccp_dm_free(&key);
1089
1090 return ret;
1091}
1092
1093static noinline_for_stack int
1094ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1095{
1096 struct ccp_xts_aes_engine *xts = &cmd->u.xts;
1097 struct ccp_dm_workarea key, ctx;
1098 struct ccp_data src, dst;
1099 struct ccp_op op;
1100 unsigned int unit_size, dm_offset;
1101 bool in_place = false;
1102 unsigned int sb_count;
1103 enum ccp_aes_type aestype;
1104 int ret;
1105
1106 switch (xts->unit_size) {
1107 case CCP_XTS_AES_UNIT_SIZE_16:
1108 unit_size = 16;
1109 break;
1110 case CCP_XTS_AES_UNIT_SIZE_512:
1111 unit_size = 512;
1112 break;
1113 case CCP_XTS_AES_UNIT_SIZE_1024:
1114 unit_size = 1024;
1115 break;
1116 case CCP_XTS_AES_UNIT_SIZE_2048:
1117 unit_size = 2048;
1118 break;
1119 case CCP_XTS_AES_UNIT_SIZE_4096:
1120 unit_size = 4096;
1121 break;
1122
1123 default:
1124 return -EINVAL;
1125 }
1126
1127 if (xts->key_len == AES_KEYSIZE_128)
1128 aestype = CCP_AES_TYPE_128;
1129 else if (xts->key_len == AES_KEYSIZE_256)
1130 aestype = CCP_AES_TYPE_256;
1131 else
1132 return -EINVAL;
1133
1134 if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
1135 return -EINVAL;
1136
1137 if (xts->iv_len != AES_BLOCK_SIZE)
1138 return -EINVAL;
1139
1140 if (!xts->key || !xts->iv || !xts->src || !xts->dst)
1141 return -EINVAL;
1142
1143 BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1);
1144 BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1);
1145
1146 ret = -EIO;
1147 memset(&op, 0, sizeof(op));
1148 op.cmd_q = cmd_q;
1149 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1150 op.sb_key = cmd_q->sb_key;
1151 op.sb_ctx = cmd_q->sb_ctx;
1152 op.init = 1;
1153 op.u.xts.type = aestype;
1154 op.u.xts.action = xts->action;
1155 op.u.xts.unit_size = xts->unit_size;
1156
1157 /* A version 3 device only supports 128-bit keys, which fits into a
1158 * single SB entry. A version 5 device uses a 512-bit vector, so two
1159 * SB entries.
1160 */
1161 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
1162 sb_count = CCP_XTS_AES_KEY_SB_COUNT;
1163 else
1164 sb_count = CCP5_XTS_AES_KEY_SB_COUNT;
1165 ret = ccp_init_dm_workarea(&key, cmd_q,
1166 sb_count * CCP_SB_BYTES,
1167 DMA_TO_DEVICE);
1168 if (ret)
1169 return ret;
1170
1171 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
1172 /* All supported key sizes must be in little endian format.
1173 * Use the 256-bit byte swap passthru option to convert from
1174 * big endian to little endian.
1175 */
1176 dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
1177 ret = ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
1178 if (ret)
1179 goto e_key;
1180 ret = ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
1181 if (ret)
1182 goto e_key;
1183 } else {
1184 /* Version 5 CCPs use a 512-bit space for the key: each portion
1185 * occupies 256 bits, or one entire slot, and is zero-padded.
1186 */
1187 unsigned int pad;
1188
1189 dm_offset = CCP_SB_BYTES;
1190 pad = dm_offset - xts->key_len;
1191 ret = ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
1192 if (ret)
1193 goto e_key;
1194 ret = ccp_set_dm_area(&key, dm_offset + pad, xts->key,
1195 xts->key_len, xts->key_len);
1196 if (ret)
1197 goto e_key;
1198 }
1199 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
1200 CCP_PASSTHRU_BYTESWAP_256BIT);
1201 if (ret) {
1202 cmd->engine_error = cmd_q->cmd_error;
1203 goto e_key;
1204 }
1205
1206 /* The AES context fits in a single (32-byte) SB entry and
1207 * for XTS is already in little endian format so no byte swapping
1208 * is needed.
1209 */
1210 ret = ccp_init_dm_workarea(&ctx, cmd_q,
1211 CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES,
1212 DMA_BIDIRECTIONAL);
1213 if (ret)
1214 goto e_key;
1215
1216 ret = ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
1217 if (ret)
1218 goto e_ctx;
1219 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1220 CCP_PASSTHRU_BYTESWAP_NOOP);
1221 if (ret) {
1222 cmd->engine_error = cmd_q->cmd_error;
1223 goto e_ctx;
1224 }
1225
1226 /* Prepare the input and output data workareas. For in-place
1227 * operations we need to set the dma direction to BIDIRECTIONAL
1228 * and copy the src workarea to the dst workarea.
1229 */
1230 if (sg_virt(xts->src) == sg_virt(xts->dst))
1231 in_place = true;
1232
1233 ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len,
1234 unit_size,
1235 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1236 if (ret)
1237 goto e_ctx;
1238
1239 if (in_place) {
1240 dst = src;
1241 } else {
1242 ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len,
1243 unit_size, DMA_FROM_DEVICE);
1244 if (ret)
1245 goto e_src;
1246 }
1247
1248 /* Send data to the CCP AES engine */
1249 while (src.sg_wa.bytes_left) {
1250 ccp_prepare_data(&src, &dst, &op, unit_size, true);
1251 if (!src.sg_wa.bytes_left)
1252 op.eom = 1;
1253
1254 ret = cmd_q->ccp->vdata->perform->xts_aes(&op);
1255 if (ret) {
1256 cmd->engine_error = cmd_q->cmd_error;
1257 goto e_dst;
1258 }
1259
1260 ccp_process_data(&src, &dst, &op);
1261 }
1262
1263 /* Retrieve the AES context - convert from LE to BE using
1264 * 32-byte (256-bit) byteswapping
1265 */
1266 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1267 CCP_PASSTHRU_BYTESWAP_256BIT);
1268 if (ret) {
1269 cmd->engine_error = cmd_q->cmd_error;
1270 goto e_dst;
1271 }
1272
1273 /* ...but we only need AES_BLOCK_SIZE bytes */
1274 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
1275 ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len);
1276
1277e_dst:
1278 if (!in_place)
1279 ccp_free_data(&dst, cmd_q);
1280
1281e_src:
1282 ccp_free_data(&src, cmd_q);
1283
1284e_ctx:
1285 ccp_dm_free(&ctx);
1286
1287e_key:
1288 ccp_dm_free(&key);
1289
1290 return ret;
1291}
1292
1293static noinline_for_stack int
1294ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1295{
1296 struct ccp_des3_engine *des3 = &cmd->u.des3;
1297
1298 struct ccp_dm_workarea key, ctx;
1299 struct ccp_data src, dst;
1300 struct ccp_op op;
1301 unsigned int dm_offset;
1302 unsigned int len_singlekey;
1303 bool in_place = false;
1304 int ret;
1305
1306 /* Error checks */
1307 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0))
1308 return -EINVAL;
1309
1310 if (!cmd_q->ccp->vdata->perform->des3)
1311 return -EINVAL;
1312
1313 if (des3->key_len != DES3_EDE_KEY_SIZE)
1314 return -EINVAL;
1315
1316 if (((des3->mode == CCP_DES3_MODE_ECB) ||
1317 (des3->mode == CCP_DES3_MODE_CBC)) &&
1318 (des3->src_len & (DES3_EDE_BLOCK_SIZE - 1)))
1319 return -EINVAL;
1320
1321 if (!des3->key || !des3->src || !des3->dst)
1322 return -EINVAL;
1323
1324 if (des3->mode != CCP_DES3_MODE_ECB) {
1325 if (des3->iv_len != DES3_EDE_BLOCK_SIZE)
1326 return -EINVAL;
1327
1328 if (!des3->iv)
1329 return -EINVAL;
1330 }
1331
1332 ret = -EIO;
1333 /* Zero out all the fields of the command desc */
1334 memset(&op, 0, sizeof(op));
1335
1336 /* Set up the Function field */
1337 op.cmd_q = cmd_q;
1338 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1339 op.sb_key = cmd_q->sb_key;
1340
1341 op.init = (des3->mode == CCP_DES3_MODE_ECB) ? 0 : 1;
1342 op.u.des3.type = des3->type;
1343 op.u.des3.mode = des3->mode;
1344 op.u.des3.action = des3->action;
1345
1346 /*
1347 * All supported key sizes fit in a single (32-byte) KSB entry and
1348 * (like AES) must be in little endian format. Use the 256-bit byte
1349 * swap passthru option to convert from big endian to little endian.
1350 */
1351 ret = ccp_init_dm_workarea(&key, cmd_q,
1352 CCP_DES3_KEY_SB_COUNT * CCP_SB_BYTES,
1353 DMA_TO_DEVICE);
1354 if (ret)
1355 return ret;
1356
1357 /*
1358 * The contents of the key triplet are in the reverse order of what
1359 * is required by the engine. Copy the 3 pieces individually to put
1360 * them where they belong.
1361 */
1362 dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */
1363
1364 len_singlekey = des3->key_len / 3;
1365 ret = ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
1366 des3->key, 0, len_singlekey);
1367 if (ret)
1368 goto e_key;
1369 ret = ccp_set_dm_area(&key, dm_offset + len_singlekey,
1370 des3->key, len_singlekey, len_singlekey);
1371 if (ret)
1372 goto e_key;
1373 ret = ccp_set_dm_area(&key, dm_offset,
1374 des3->key, 2 * len_singlekey, len_singlekey);
1375 if (ret)
1376 goto e_key;
1377
1378 /* Copy the key to the SB */
1379 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
1380 CCP_PASSTHRU_BYTESWAP_256BIT);
1381 if (ret) {
1382 cmd->engine_error = cmd_q->cmd_error;
1383 goto e_key;
1384 }
1385
1386 /*
1387 * The DES3 context fits in a single (32-byte) KSB entry and
1388 * must be in little endian format. Use the 256-bit byte swap
1389 * passthru option to convert from big endian to little endian.
1390 */
1391 if (des3->mode != CCP_DES3_MODE_ECB) {
1392 op.sb_ctx = cmd_q->sb_ctx;
1393
1394 ret = ccp_init_dm_workarea(&ctx, cmd_q,
1395 CCP_DES3_CTX_SB_COUNT * CCP_SB_BYTES,
1396 DMA_BIDIRECTIONAL);
1397 if (ret)
1398 goto e_key;
1399
1400 /* Load the context into the LSB */
1401 dm_offset = CCP_SB_BYTES - des3->iv_len;
1402 ret = ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0,
1403 des3->iv_len);
1404 if (ret)
1405 goto e_ctx;
1406
1407 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1408 CCP_PASSTHRU_BYTESWAP_256BIT);
1409 if (ret) {
1410 cmd->engine_error = cmd_q->cmd_error;
1411 goto e_ctx;
1412 }
1413 }
1414
1415 /*
1416 * Prepare the input and output data workareas. For in-place
1417 * operations we need to set the dma direction to BIDIRECTIONAL
1418 * and copy the src workarea to the dst workarea.
1419 */
1420 if (sg_virt(des3->src) == sg_virt(des3->dst))
1421 in_place = true;
1422
1423 ret = ccp_init_data(&src, cmd_q, des3->src, des3->src_len,
1424 DES3_EDE_BLOCK_SIZE,
1425 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1426 if (ret)
1427 goto e_ctx;
1428
1429 if (in_place)
1430 dst = src;
1431 else {
1432 ret = ccp_init_data(&dst, cmd_q, des3->dst, des3->src_len,
1433 DES3_EDE_BLOCK_SIZE, DMA_FROM_DEVICE);
1434 if (ret)
1435 goto e_src;
1436 }
1437
1438 /* Send data to the CCP DES3 engine */
1439 while (src.sg_wa.bytes_left) {
1440 ccp_prepare_data(&src, &dst, &op, DES3_EDE_BLOCK_SIZE, true);
1441 if (!src.sg_wa.bytes_left) {
1442 op.eom = 1;
1443
1444 /* Since we don't retrieve the context in ECB mode
1445 * we have to wait for the operation to complete
1446 * on the last piece of data
1447 */
1448 op.soc = 0;
1449 }
1450
1451 ret = cmd_q->ccp->vdata->perform->des3(&op);
1452 if (ret) {
1453 cmd->engine_error = cmd_q->cmd_error;
1454 goto e_dst;
1455 }
1456
1457 ccp_process_data(&src, &dst, &op);
1458 }
1459
1460 if (des3->mode != CCP_DES3_MODE_ECB) {
1461 /* Retrieve the context and make BE */
1462 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1463 CCP_PASSTHRU_BYTESWAP_256BIT);
1464 if (ret) {
1465 cmd->engine_error = cmd_q->cmd_error;
1466 goto e_dst;
1467 }
1468
1469 /* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */
1470 ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0,
1471 DES3_EDE_BLOCK_SIZE);
1472 }
1473e_dst:
1474 if (!in_place)
1475 ccp_free_data(&dst, cmd_q);
1476
1477e_src:
1478 ccp_free_data(&src, cmd_q);
1479
1480e_ctx:
1481 if (des3->mode != CCP_DES3_MODE_ECB)
1482 ccp_dm_free(&ctx);
1483
1484e_key:
1485 ccp_dm_free(&key);
1486
1487 return ret;
1488}
1489
1490static noinline_for_stack int
1491ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1492{
1493 struct ccp_sha_engine *sha = &cmd->u.sha;
1494 struct ccp_dm_workarea ctx;
1495 struct ccp_data src;
1496 struct ccp_op op;
1497 unsigned int ioffset, ooffset;
1498 unsigned int digest_size;
1499 int sb_count;
1500 const void *init;
1501 u64 block_size;
1502 int ctx_size;
1503 int ret;
1504
1505 switch (sha->type) {
1506 case CCP_SHA_TYPE_1:
1507 if (sha->ctx_len < SHA1_DIGEST_SIZE)
1508 return -EINVAL;
1509 block_size = SHA1_BLOCK_SIZE;
1510 break;
1511 case CCP_SHA_TYPE_224:
1512 if (sha->ctx_len < SHA224_DIGEST_SIZE)
1513 return -EINVAL;
1514 block_size = SHA224_BLOCK_SIZE;
1515 break;
1516 case CCP_SHA_TYPE_256:
1517 if (sha->ctx_len < SHA256_DIGEST_SIZE)
1518 return -EINVAL;
1519 block_size = SHA256_BLOCK_SIZE;
1520 break;
1521 case CCP_SHA_TYPE_384:
1522 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
1523 || sha->ctx_len < SHA384_DIGEST_SIZE)
1524 return -EINVAL;
1525 block_size = SHA384_BLOCK_SIZE;
1526 break;
1527 case CCP_SHA_TYPE_512:
1528 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
1529 || sha->ctx_len < SHA512_DIGEST_SIZE)
1530 return -EINVAL;
1531 block_size = SHA512_BLOCK_SIZE;
1532 break;
1533 default:
1534 return -EINVAL;
1535 }
1536
1537 if (!sha->ctx)
1538 return -EINVAL;
1539
1540 if (!sha->final && (sha->src_len & (block_size - 1)))
1541 return -EINVAL;
1542
1543 /* The version 3 device can't handle zero-length input */
1544 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
1545
1546 if (!sha->src_len) {
1547 unsigned int digest_len;
1548 const u8 *sha_zero;
1549
1550 /* Not final, just return */
1551 if (!sha->final)
1552 return 0;
1553
1554 /* CCP can't do a zero length sha operation so the
1555 * caller must buffer the data.
1556 */
1557 if (sha->msg_bits)
1558 return -EINVAL;
1559
1560 /* The CCP cannot perform zero-length sha operations
1561 * so the caller is required to buffer data for the
1562 * final operation. However, a sha operation for a
1563 * message with a total length of zero is valid so
1564 * known values are required to supply the result.
1565 */
1566 switch (sha->type) {
1567 case CCP_SHA_TYPE_1:
1568 sha_zero = sha1_zero_message_hash;
1569 digest_len = SHA1_DIGEST_SIZE;
1570 break;
1571 case CCP_SHA_TYPE_224:
1572 sha_zero = sha224_zero_message_hash;
1573 digest_len = SHA224_DIGEST_SIZE;
1574 break;
1575 case CCP_SHA_TYPE_256:
1576 sha_zero = sha256_zero_message_hash;
1577 digest_len = SHA256_DIGEST_SIZE;
1578 break;
1579 default:
1580 return -EINVAL;
1581 }
1582
1583 scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
1584 digest_len, 1);
1585
1586 return 0;
1587 }
1588 }
1589
1590 /* Set variables used throughout */
1591 switch (sha->type) {
1592 case CCP_SHA_TYPE_1:
1593 digest_size = SHA1_DIGEST_SIZE;
1594 init = (void *) ccp_sha1_init;
1595 ctx_size = SHA1_DIGEST_SIZE;
1596 sb_count = 1;
1597 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1598 ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
1599 else
1600 ooffset = ioffset = 0;
1601 break;
1602 case CCP_SHA_TYPE_224:
1603 digest_size = SHA224_DIGEST_SIZE;
1604 init = (void *) ccp_sha224_init;
1605 ctx_size = SHA256_DIGEST_SIZE;
1606 sb_count = 1;
1607 ioffset = 0;
1608 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1609 ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
1610 else
1611 ooffset = 0;
1612 break;
1613 case CCP_SHA_TYPE_256:
1614 digest_size = SHA256_DIGEST_SIZE;
1615 init = (void *) ccp_sha256_init;
1616 ctx_size = SHA256_DIGEST_SIZE;
1617 sb_count = 1;
1618 ooffset = ioffset = 0;
1619 break;
1620 case CCP_SHA_TYPE_384:
1621 digest_size = SHA384_DIGEST_SIZE;
1622 init = (void *) ccp_sha384_init;
1623 ctx_size = SHA512_DIGEST_SIZE;
1624 sb_count = 2;
1625 ioffset = 0;
1626 ooffset = 2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE;
1627 break;
1628 case CCP_SHA_TYPE_512:
1629 digest_size = SHA512_DIGEST_SIZE;
1630 init = (void *) ccp_sha512_init;
1631 ctx_size = SHA512_DIGEST_SIZE;
1632 sb_count = 2;
1633 ooffset = ioffset = 0;
1634 break;
1635 default:
1636 ret = -EINVAL;
1637 goto e_data;
1638 }
1639
1640 /* For zero-length plaintext the src pointer is ignored;
1641 * otherwise both parts must be valid
1642 */
1643 if (sha->src_len && !sha->src)
1644 return -EINVAL;
1645
1646 memset(&op, 0, sizeof(op));
1647 op.cmd_q = cmd_q;
1648 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1649 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
1650 op.u.sha.type = sha->type;
1651 op.u.sha.msg_bits = sha->msg_bits;
1652
1653 /* For SHA1/224/256 the context fits in a single (32-byte) SB entry;
1654 * SHA384/512 require 2 adjacent SB slots, with the right half in the
1655 * first slot, and the left half in the second. Each portion must then
1656 * be in little endian format: use the 256-bit byte swap option.
1657 */
1658 ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES,
1659 DMA_BIDIRECTIONAL);
1660 if (ret)
1661 return ret;
1662 if (sha->first) {
1663 switch (sha->type) {
1664 case CCP_SHA_TYPE_1:
1665 case CCP_SHA_TYPE_224:
1666 case CCP_SHA_TYPE_256:
1667 memcpy(ctx.address + ioffset, init, ctx_size);
1668 break;
1669 case CCP_SHA_TYPE_384:
1670 case CCP_SHA_TYPE_512:
1671 memcpy(ctx.address + ctx_size / 2, init,
1672 ctx_size / 2);
1673 memcpy(ctx.address, init + ctx_size / 2,
1674 ctx_size / 2);
1675 break;
1676 default:
1677 ret = -EINVAL;
1678 goto e_ctx;
1679 }
1680 } else {
1681 /* Restore the context */
1682 ret = ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
1683 sb_count * CCP_SB_BYTES);
1684 if (ret)
1685 goto e_ctx;
1686 }
1687
1688 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1689 CCP_PASSTHRU_BYTESWAP_256BIT);
1690 if (ret) {
1691 cmd->engine_error = cmd_q->cmd_error;
1692 goto e_ctx;
1693 }
1694
1695 if (sha->src) {
1696 /* Send data to the CCP SHA engine; block_size is set above */
1697 ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
1698 block_size, DMA_TO_DEVICE);
1699 if (ret)
1700 goto e_ctx;
1701
1702 while (src.sg_wa.bytes_left) {
1703 ccp_prepare_data(&src, NULL, &op, block_size, false);
1704 if (sha->final && !src.sg_wa.bytes_left)
1705 op.eom = 1;
1706
1707 ret = cmd_q->ccp->vdata->perform->sha(&op);
1708 if (ret) {
1709 cmd->engine_error = cmd_q->cmd_error;
1710 goto e_data;
1711 }
1712
1713 ccp_process_data(&src, NULL, &op);
1714 }
1715 } else {
1716 op.eom = 1;
1717 ret = cmd_q->ccp->vdata->perform->sha(&op);
1718 if (ret) {
1719 cmd->engine_error = cmd_q->cmd_error;
1720 goto e_data;
1721 }
1722 }
1723
1724 /* Retrieve the SHA context - convert from LE to BE using
1725 * 32-byte (256-bit) byteswapping to BE
1726 */
1727 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1728 CCP_PASSTHRU_BYTESWAP_256BIT);
1729 if (ret) {
1730 cmd->engine_error = cmd_q->cmd_error;
1731 goto e_data;
1732 }
1733
1734 if (sha->final) {
1735 /* Finishing up, so get the digest */
1736 switch (sha->type) {
1737 case CCP_SHA_TYPE_1:
1738 case CCP_SHA_TYPE_224:
1739 case CCP_SHA_TYPE_256:
1740 ccp_get_dm_area(&ctx, ooffset,
1741 sha->ctx, 0,
1742 digest_size);
1743 break;
1744 case CCP_SHA_TYPE_384:
1745 case CCP_SHA_TYPE_512:
1746 ccp_get_dm_area(&ctx, 0,
1747 sha->ctx, LSB_ITEM_SIZE - ooffset,
1748 LSB_ITEM_SIZE);
1749 ccp_get_dm_area(&ctx, LSB_ITEM_SIZE + ooffset,
1750 sha->ctx, 0,
1751 LSB_ITEM_SIZE - ooffset);
1752 break;
1753 default:
1754 ret = -EINVAL;
1755 goto e_ctx;
1756 }
1757 } else {
1758 /* Stash the context */
1759 ccp_get_dm_area(&ctx, 0, sha->ctx, 0,
1760 sb_count * CCP_SB_BYTES);
1761 }
1762
1763 if (sha->final && sha->opad) {
1764 /* HMAC operation, recursively perform final SHA */
1765 struct ccp_cmd hmac_cmd;
1766 struct scatterlist sg;
1767 u8 *hmac_buf;
1768
1769 if (sha->opad_len != block_size) {
1770 ret = -EINVAL;
1771 goto e_data;
1772 }
1773
1774 hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL);
1775 if (!hmac_buf) {
1776 ret = -ENOMEM;
1777 goto e_data;
1778 }
1779 sg_init_one(&sg, hmac_buf, block_size + digest_size);
1780
1781 scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
1782 switch (sha->type) {
1783 case CCP_SHA_TYPE_1:
1784 case CCP_SHA_TYPE_224:
1785 case CCP_SHA_TYPE_256:
1786 memcpy(hmac_buf + block_size,
1787 ctx.address + ooffset,
1788 digest_size);
1789 break;
1790 case CCP_SHA_TYPE_384:
1791 case CCP_SHA_TYPE_512:
1792 memcpy(hmac_buf + block_size,
1793 ctx.address + LSB_ITEM_SIZE + ooffset,
1794 LSB_ITEM_SIZE);
1795 memcpy(hmac_buf + block_size +
1796 (LSB_ITEM_SIZE - ooffset),
1797 ctx.address,
1798 LSB_ITEM_SIZE);
1799 break;
1800 default:
1801 kfree(hmac_buf);
1802 ret = -EINVAL;
1803 goto e_data;
1804 }
1805
1806 memset(&hmac_cmd, 0, sizeof(hmac_cmd));
1807 hmac_cmd.engine = CCP_ENGINE_SHA;
1808 hmac_cmd.u.sha.type = sha->type;
1809 hmac_cmd.u.sha.ctx = sha->ctx;
1810 hmac_cmd.u.sha.ctx_len = sha->ctx_len;
1811 hmac_cmd.u.sha.src = &sg;
1812 hmac_cmd.u.sha.src_len = block_size + digest_size;
1813 hmac_cmd.u.sha.opad = NULL;
1814 hmac_cmd.u.sha.opad_len = 0;
1815 hmac_cmd.u.sha.first = 1;
1816 hmac_cmd.u.sha.final = 1;
1817 hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3;
1818
1819 ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd);
1820 if (ret)
1821 cmd->engine_error = hmac_cmd.engine_error;
1822
1823 kfree(hmac_buf);
1824 }
1825
1826e_data:
1827 if (sha->src)
1828 ccp_free_data(&src, cmd_q);
1829
1830e_ctx:
1831 ccp_dm_free(&ctx);
1832
1833 return ret;
1834}
1835
1836static noinline_for_stack int
1837ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1838{
1839 struct ccp_rsa_engine *rsa = &cmd->u.rsa;
1840 struct ccp_dm_workarea exp, src, dst;
1841 struct ccp_op op;
1842 unsigned int sb_count, i_len, o_len;
1843 int ret;
1844
1845 /* Check against the maximum allowable size, in bits */
1846 if (rsa->key_size > cmd_q->ccp->vdata->rsamax)
1847 return -EINVAL;
1848
1849 if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
1850 return -EINVAL;
1851
1852 memset(&op, 0, sizeof(op));
1853 op.cmd_q = cmd_q;
1854 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1855
1856 /* The RSA modulus must precede the message being acted upon, so
1857 * it must be copied to a DMA area where the message and the
1858 * modulus can be concatenated. Therefore the input buffer
1859 * length required is twice the output buffer length (which
1860 * must be a multiple of 256-bits). Compute o_len, i_len in bytes.
1861 * Buffer sizes must be a multiple of 32 bytes; rounding up may be
1862 * required.
1863 */
1864 o_len = 32 * ((rsa->key_size + 255) / 256);
1865 i_len = o_len * 2;
1866
1867 sb_count = 0;
1868 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
1869 /* sb_count is the number of storage block slots required
1870 * for the modulus.
1871 */
1872 sb_count = o_len / CCP_SB_BYTES;
1873 op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q,
1874 sb_count);
1875 if (!op.sb_key)
1876 return -EIO;
1877 } else {
1878 /* A version 5 device allows a modulus size that will not fit
1879 * in the LSB, so the command will transfer it from memory.
1880 * Set the sb key to the default, even though it's not used.
1881 */
1882 op.sb_key = cmd_q->sb_key;
1883 }
1884
1885 /* The RSA exponent must be in little endian format. Reverse its
1886 * byte order.
1887 */
1888 ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
1889 if (ret)
1890 goto e_sb;
1891
1892 ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len);
1893 if (ret)
1894 goto e_exp;
1895
1896 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
1897 /* Copy the exponent to the local storage block, using
1898 * as many 32-byte blocks as were allocated above. It's
1899 * already little endian, so no further change is required.
1900 */
1901 ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
1902 CCP_PASSTHRU_BYTESWAP_NOOP);
1903 if (ret) {
1904 cmd->engine_error = cmd_q->cmd_error;
1905 goto e_exp;
1906 }
1907 } else {
1908 /* The exponent can be retrieved from memory via DMA. */
1909 op.exp.u.dma.address = exp.dma.address;
1910 op.exp.u.dma.offset = 0;
1911 }
1912
1913 /* Concatenate the modulus and the message. Both the modulus and
1914 * the operands must be in little endian format. Since the input
1915 * is in big endian format it must be converted.
1916 */
1917 ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE);
1918 if (ret)
1919 goto e_exp;
1920
1921 ret = ccp_reverse_set_dm_area(&src, 0, rsa->mod, 0, rsa->mod_len);
1922 if (ret)
1923 goto e_src;
1924 ret = ccp_reverse_set_dm_area(&src, o_len, rsa->src, 0, rsa->src_len);
1925 if (ret)
1926 goto e_src;
1927
1928 /* Prepare the output area for the operation */
1929 ret = ccp_init_dm_workarea(&dst, cmd_q, o_len, DMA_FROM_DEVICE);
1930 if (ret)
1931 goto e_src;
1932
1933 op.soc = 1;
1934 op.src.u.dma.address = src.dma.address;
1935 op.src.u.dma.offset = 0;
1936 op.src.u.dma.length = i_len;
1937 op.dst.u.dma.address = dst.dma.address;
1938 op.dst.u.dma.offset = 0;
1939 op.dst.u.dma.length = o_len;
1940
1941 op.u.rsa.mod_size = rsa->key_size;
1942 op.u.rsa.input_len = i_len;
1943
1944 ret = cmd_q->ccp->vdata->perform->rsa(&op);
1945 if (ret) {
1946 cmd->engine_error = cmd_q->cmd_error;
1947 goto e_dst;
1948 }
1949
1950 ccp_reverse_get_dm_area(&dst, 0, rsa->dst, 0, rsa->mod_len);
1951
1952e_dst:
1953 ccp_dm_free(&dst);
1954
1955e_src:
1956 ccp_dm_free(&src);
1957
1958e_exp:
1959 ccp_dm_free(&exp);
1960
1961e_sb:
1962 if (sb_count)
1963 cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
1964
1965 return ret;
1966}
1967
1968static noinline_for_stack int
1969ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1970{
1971 struct ccp_passthru_engine *pt = &cmd->u.passthru;
1972 struct ccp_dm_workarea mask;
1973 struct ccp_data src, dst;
1974 struct ccp_op op;
1975 bool in_place = false;
1976 unsigned int i;
1977 int ret = 0;
1978
1979 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
1980 return -EINVAL;
1981
1982 if (!pt->src || !pt->dst)
1983 return -EINVAL;
1984
1985 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1986 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
1987 return -EINVAL;
1988 if (!pt->mask)
1989 return -EINVAL;
1990 }
1991
1992 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
1993
1994 memset(&op, 0, sizeof(op));
1995 op.cmd_q = cmd_q;
1996 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1997
1998 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1999 /* Load the mask */
2000 op.sb_key = cmd_q->sb_key;
2001
2002 ret = ccp_init_dm_workarea(&mask, cmd_q,
2003 CCP_PASSTHRU_SB_COUNT *
2004 CCP_SB_BYTES,
2005 DMA_TO_DEVICE);
2006 if (ret)
2007 return ret;
2008
2009 ret = ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
2010 if (ret)
2011 goto e_mask;
2012 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
2013 CCP_PASSTHRU_BYTESWAP_NOOP);
2014 if (ret) {
2015 cmd->engine_error = cmd_q->cmd_error;
2016 goto e_mask;
2017 }
2018 }
2019
2020 /* Prepare the input and output data workareas. For in-place
2021 * operations we need to set the dma direction to BIDIRECTIONAL
2022 * and copy the src workarea to the dst workarea.
2023 */
2024 if (sg_virt(pt->src) == sg_virt(pt->dst))
2025 in_place = true;
2026
2027 ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len,
2028 CCP_PASSTHRU_MASKSIZE,
2029 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
2030 if (ret)
2031 goto e_mask;
2032
2033 if (in_place) {
2034 dst = src;
2035 } else {
2036 ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len,
2037 CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE);
2038 if (ret)
2039 goto e_src;
2040 }
2041
2042 /* Send data to the CCP Passthru engine
2043 * Because the CCP engine works on a single source and destination
2044 * dma address at a time, each entry in the source scatterlist
2045 * (after the dma_map_sg call) must be less than or equal to the
2046 * (remaining) length in the destination scatterlist entry and the
2047 * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE
2048 */
2049 dst.sg_wa.sg_used = 0;
2050 for (i = 1; i <= src.sg_wa.dma_count; i++) {
2051 if (!dst.sg_wa.sg ||
2052 (sg_dma_len(dst.sg_wa.sg) < sg_dma_len(src.sg_wa.sg))) {
2053 ret = -EINVAL;
2054 goto e_dst;
2055 }
2056
2057 if (i == src.sg_wa.dma_count) {
2058 op.eom = 1;
2059 op.soc = 1;
2060 }
2061
2062 op.src.type = CCP_MEMTYPE_SYSTEM;
2063 op.src.u.dma.address = sg_dma_address(src.sg_wa.sg);
2064 op.src.u.dma.offset = 0;
2065 op.src.u.dma.length = sg_dma_len(src.sg_wa.sg);
2066
2067 op.dst.type = CCP_MEMTYPE_SYSTEM;
2068 op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
2069 op.dst.u.dma.offset = dst.sg_wa.sg_used;
2070 op.dst.u.dma.length = op.src.u.dma.length;
2071
2072 ret = cmd_q->ccp->vdata->perform->passthru(&op);
2073 if (ret) {
2074 cmd->engine_error = cmd_q->cmd_error;
2075 goto e_dst;
2076 }
2077
2078 dst.sg_wa.sg_used += sg_dma_len(src.sg_wa.sg);
2079 if (dst.sg_wa.sg_used == sg_dma_len(dst.sg_wa.sg)) {
2080 dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
2081 dst.sg_wa.sg_used = 0;
2082 }
2083 src.sg_wa.sg = sg_next(src.sg_wa.sg);
2084 }
2085
2086e_dst:
2087 if (!in_place)
2088 ccp_free_data(&dst, cmd_q);
2089
2090e_src:
2091 ccp_free_data(&src, cmd_q);
2092
2093e_mask:
2094 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
2095 ccp_dm_free(&mask);
2096
2097 return ret;
2098}
2099
2100static noinline_for_stack int
2101ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
2102 struct ccp_cmd *cmd)
2103{
2104 struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap;
2105 struct ccp_dm_workarea mask;
2106 struct ccp_op op;
2107 int ret;
2108
2109 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
2110 return -EINVAL;
2111
2112 if (!pt->src_dma || !pt->dst_dma)
2113 return -EINVAL;
2114
2115 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
2116 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
2117 return -EINVAL;
2118 if (!pt->mask)
2119 return -EINVAL;
2120 }
2121
2122 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
2123
2124 memset(&op, 0, sizeof(op));
2125 op.cmd_q = cmd_q;
2126 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
2127
2128 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
2129 /* Load the mask */
2130 op.sb_key = cmd_q->sb_key;
2131
2132 mask.length = pt->mask_len;
2133 mask.dma.address = pt->mask;
2134 mask.dma.length = pt->mask_len;
2135
2136 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
2137 CCP_PASSTHRU_BYTESWAP_NOOP);
2138 if (ret) {
2139 cmd->engine_error = cmd_q->cmd_error;
2140 return ret;
2141 }
2142 }
2143
2144 /* Send data to the CCP Passthru engine */
2145 op.eom = 1;
2146 op.soc = 1;
2147
2148 op.src.type = CCP_MEMTYPE_SYSTEM;
2149 op.src.u.dma.address = pt->src_dma;
2150 op.src.u.dma.offset = 0;
2151 op.src.u.dma.length = pt->src_len;
2152
2153 op.dst.type = CCP_MEMTYPE_SYSTEM;
2154 op.dst.u.dma.address = pt->dst_dma;
2155 op.dst.u.dma.offset = 0;
2156 op.dst.u.dma.length = pt->src_len;
2157
2158 ret = cmd_q->ccp->vdata->perform->passthru(&op);
2159 if (ret)
2160 cmd->engine_error = cmd_q->cmd_error;
2161
2162 return ret;
2163}
2164
2165static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2166{
2167 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2168 struct ccp_dm_workarea src, dst;
2169 struct ccp_op op;
2170 int ret;
2171 u8 *save;
2172
2173 if (!ecc->u.mm.operand_1 ||
2174 (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES))
2175 return -EINVAL;
2176
2177 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT)
2178 if (!ecc->u.mm.operand_2 ||
2179 (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES))
2180 return -EINVAL;
2181
2182 if (!ecc->u.mm.result ||
2183 (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES))
2184 return -EINVAL;
2185
2186 memset(&op, 0, sizeof(op));
2187 op.cmd_q = cmd_q;
2188 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
2189
2190 /* Concatenate the modulus and the operands. Both the modulus and
2191 * the operands must be in little endian format. Since the input
2192 * is in big endian format it must be converted and placed in a
2193 * fixed length buffer.
2194 */
2195 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
2196 DMA_TO_DEVICE);
2197 if (ret)
2198 return ret;
2199
2200 /* Save the workarea address since it is updated in order to perform
2201 * the concatenation
2202 */
2203 save = src.address;
2204
2205 /* Copy the ECC modulus */
2206 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
2207 if (ret)
2208 goto e_src;
2209 src.address += CCP_ECC_OPERAND_SIZE;
2210
2211 /* Copy the first operand */
2212 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_1, 0,
2213 ecc->u.mm.operand_1_len);
2214 if (ret)
2215 goto e_src;
2216 src.address += CCP_ECC_OPERAND_SIZE;
2217
2218 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
2219 /* Copy the second operand */
2220 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_2, 0,
2221 ecc->u.mm.operand_2_len);
2222 if (ret)
2223 goto e_src;
2224 src.address += CCP_ECC_OPERAND_SIZE;
2225 }
2226
2227 /* Restore the workarea address */
2228 src.address = save;
2229
2230 /* Prepare the output area for the operation */
2231 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
2232 DMA_FROM_DEVICE);
2233 if (ret)
2234 goto e_src;
2235
2236 op.soc = 1;
2237 op.src.u.dma.address = src.dma.address;
2238 op.src.u.dma.offset = 0;
2239 op.src.u.dma.length = src.length;
2240 op.dst.u.dma.address = dst.dma.address;
2241 op.dst.u.dma.offset = 0;
2242 op.dst.u.dma.length = dst.length;
2243
2244 op.u.ecc.function = cmd->u.ecc.function;
2245
2246 ret = cmd_q->ccp->vdata->perform->ecc(&op);
2247 if (ret) {
2248 cmd->engine_error = cmd_q->cmd_error;
2249 goto e_dst;
2250 }
2251
2252 ecc->ecc_result = le16_to_cpup(
2253 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
2254 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
2255 ret = -EIO;
2256 goto e_dst;
2257 }
2258
2259 /* Save the ECC result */
2260 ccp_reverse_get_dm_area(&dst, 0, ecc->u.mm.result, 0,
2261 CCP_ECC_MODULUS_BYTES);
2262
2263e_dst:
2264 ccp_dm_free(&dst);
2265
2266e_src:
2267 ccp_dm_free(&src);
2268
2269 return ret;
2270}
2271
2272static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2273{
2274 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2275 struct ccp_dm_workarea src, dst;
2276 struct ccp_op op;
2277 int ret;
2278 u8 *save;
2279
2280 if (!ecc->u.pm.point_1.x ||
2281 (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) ||
2282 !ecc->u.pm.point_1.y ||
2283 (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES))
2284 return -EINVAL;
2285
2286 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
2287 if (!ecc->u.pm.point_2.x ||
2288 (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) ||
2289 !ecc->u.pm.point_2.y ||
2290 (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES))
2291 return -EINVAL;
2292 } else {
2293 if (!ecc->u.pm.domain_a ||
2294 (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES))
2295 return -EINVAL;
2296
2297 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT)
2298 if (!ecc->u.pm.scalar ||
2299 (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES))
2300 return -EINVAL;
2301 }
2302
2303 if (!ecc->u.pm.result.x ||
2304 (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) ||
2305 !ecc->u.pm.result.y ||
2306 (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES))
2307 return -EINVAL;
2308
2309 memset(&op, 0, sizeof(op));
2310 op.cmd_q = cmd_q;
2311 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
2312
2313 /* Concatenate the modulus and the operands. Both the modulus and
2314 * the operands must be in little endian format. Since the input
2315 * is in big endian format it must be converted and placed in a
2316 * fixed length buffer.
2317 */
2318 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
2319 DMA_TO_DEVICE);
2320 if (ret)
2321 return ret;
2322
2323 /* Save the workarea address since it is updated in order to perform
2324 * the concatenation
2325 */
2326 save = src.address;
2327
2328 /* Copy the ECC modulus */
2329 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
2330 if (ret)
2331 goto e_src;
2332 src.address += CCP_ECC_OPERAND_SIZE;
2333
2334 /* Copy the first point X and Y coordinate */
2335 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.x, 0,
2336 ecc->u.pm.point_1.x_len);
2337 if (ret)
2338 goto e_src;
2339 src.address += CCP_ECC_OPERAND_SIZE;
2340 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.y, 0,
2341 ecc->u.pm.point_1.y_len);
2342 if (ret)
2343 goto e_src;
2344 src.address += CCP_ECC_OPERAND_SIZE;
2345
2346 /* Set the first point Z coordinate to 1 */
2347 *src.address = 0x01;
2348 src.address += CCP_ECC_OPERAND_SIZE;
2349
2350 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
2351 /* Copy the second point X and Y coordinate */
2352 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.x, 0,
2353 ecc->u.pm.point_2.x_len);
2354 if (ret)
2355 goto e_src;
2356 src.address += CCP_ECC_OPERAND_SIZE;
2357 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.y, 0,
2358 ecc->u.pm.point_2.y_len);
2359 if (ret)
2360 goto e_src;
2361 src.address += CCP_ECC_OPERAND_SIZE;
2362
2363 /* Set the second point Z coordinate to 1 */
2364 *src.address = 0x01;
2365 src.address += CCP_ECC_OPERAND_SIZE;
2366 } else {
2367 /* Copy the Domain "a" parameter */
2368 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.domain_a, 0,
2369 ecc->u.pm.domain_a_len);
2370 if (ret)
2371 goto e_src;
2372 src.address += CCP_ECC_OPERAND_SIZE;
2373
2374 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
2375 /* Copy the scalar value */
2376 ret = ccp_reverse_set_dm_area(&src, 0,
2377 ecc->u.pm.scalar, 0,
2378 ecc->u.pm.scalar_len);
2379 if (ret)
2380 goto e_src;
2381 src.address += CCP_ECC_OPERAND_SIZE;
2382 }
2383 }
2384
2385 /* Restore the workarea address */
2386 src.address = save;
2387
2388 /* Prepare the output area for the operation */
2389 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
2390 DMA_FROM_DEVICE);
2391 if (ret)
2392 goto e_src;
2393
2394 op.soc = 1;
2395 op.src.u.dma.address = src.dma.address;
2396 op.src.u.dma.offset = 0;
2397 op.src.u.dma.length = src.length;
2398 op.dst.u.dma.address = dst.dma.address;
2399 op.dst.u.dma.offset = 0;
2400 op.dst.u.dma.length = dst.length;
2401
2402 op.u.ecc.function = cmd->u.ecc.function;
2403
2404 ret = cmd_q->ccp->vdata->perform->ecc(&op);
2405 if (ret) {
2406 cmd->engine_error = cmd_q->cmd_error;
2407 goto e_dst;
2408 }
2409
2410 ecc->ecc_result = le16_to_cpup(
2411 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
2412 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
2413 ret = -EIO;
2414 goto e_dst;
2415 }
2416
2417 /* Save the workarea address since it is updated as we walk through
2418 * to copy the point math result
2419 */
2420 save = dst.address;
2421
2422 /* Save the ECC result X and Y coordinates */
2423 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.x, 0,
2424 CCP_ECC_MODULUS_BYTES);
2425 dst.address += CCP_ECC_OUTPUT_SIZE;
2426 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0,
2427 CCP_ECC_MODULUS_BYTES);
2428 dst.address += CCP_ECC_OUTPUT_SIZE;
2429
2430 /* Restore the workarea address */
2431 dst.address = save;
2432
2433e_dst:
2434 ccp_dm_free(&dst);
2435
2436e_src:
2437 ccp_dm_free(&src);
2438
2439 return ret;
2440}
2441
2442static noinline_for_stack int
2443ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2444{
2445 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2446
2447 ecc->ecc_result = 0;
2448
2449 if (!ecc->mod ||
2450 (ecc->mod_len > CCP_ECC_MODULUS_BYTES))
2451 return -EINVAL;
2452
2453 switch (ecc->function) {
2454 case CCP_ECC_FUNCTION_MMUL_384BIT:
2455 case CCP_ECC_FUNCTION_MADD_384BIT:
2456 case CCP_ECC_FUNCTION_MINV_384BIT:
2457 return ccp_run_ecc_mm_cmd(cmd_q, cmd);
2458
2459 case CCP_ECC_FUNCTION_PADD_384BIT:
2460 case CCP_ECC_FUNCTION_PMUL_384BIT:
2461 case CCP_ECC_FUNCTION_PDBL_384BIT:
2462 return ccp_run_ecc_pm_cmd(cmd_q, cmd);
2463
2464 default:
2465 return -EINVAL;
2466 }
2467}
2468
2469int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2470{
2471 int ret;
2472
2473 cmd->engine_error = 0;
2474 cmd_q->cmd_error = 0;
2475 cmd_q->int_rcvd = 0;
2476 cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q);
2477
2478 switch (cmd->engine) {
2479 case CCP_ENGINE_AES:
2480 switch (cmd->u.aes.mode) {
2481 case CCP_AES_MODE_CMAC:
2482 ret = ccp_run_aes_cmac_cmd(cmd_q, cmd);
2483 break;
2484 case CCP_AES_MODE_GCM:
2485 ret = ccp_run_aes_gcm_cmd(cmd_q, cmd);
2486 break;
2487 default:
2488 ret = ccp_run_aes_cmd(cmd_q, cmd);
2489 break;
2490 }
2491 break;
2492 case CCP_ENGINE_XTS_AES_128:
2493 ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
2494 break;
2495 case CCP_ENGINE_DES3:
2496 ret = ccp_run_des3_cmd(cmd_q, cmd);
2497 break;
2498 case CCP_ENGINE_SHA:
2499 ret = ccp_run_sha_cmd(cmd_q, cmd);
2500 break;
2501 case CCP_ENGINE_RSA:
2502 ret = ccp_run_rsa_cmd(cmd_q, cmd);
2503 break;
2504 case CCP_ENGINE_PASSTHRU:
2505 if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP)
2506 ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd);
2507 else
2508 ret = ccp_run_passthru_cmd(cmd_q, cmd);
2509 break;
2510 case CCP_ENGINE_ECC:
2511 ret = ccp_run_ecc_cmd(cmd_q, cmd);
2512 break;
2513 default:
2514 ret = -EINVAL;
2515 }
2516
2517 return ret;
2518}