blob: 1907945f82b787bf4baa49fa4aaf818d458b4839 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 * Copyright 2016 NXP
6 *
7 * Based on talitos crypto API driver.
8 *
9 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
10 *
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (PDB) |
14 * --------------- |------------->| (hashKey) |
15 * . | | (cipherKey) |
16 * . | |-------->| (operation) |
17 * --------------- | | ---------------
18 * | JobDesc #2 |------| |
19 * | *(packet 2) | |
20 * --------------- |
21 * . |
22 * . |
23 * --------------- |
24 * | JobDesc #3 |------------
25 * | *(packet 3) |
26 * ---------------
27 *
28 * The SharedDesc never changes for a connection unless rekeyed, but
29 * each packet will likely be in a different place. So all we need
30 * to know to process the packet is where the input is, where the
31 * output goes, and what context we want to process with. Context is
32 * in the SharedDesc, packet references in the JobDesc.
33 *
34 * So, a job desc looks like:
35 *
36 * ---------------------
37 * | Header |
38 * | ShareDesc Pointer |
39 * | SEQ_OUT_PTR |
40 * | (output buffer) |
41 * | (output length) |
42 * | SEQ_IN_PTR |
43 * | (input buffer) |
44 * | (input length) |
45 * ---------------------
46 */
47
48#include "compat.h"
49
50#include "regs.h"
51#include "intern.h"
52#include "desc_constr.h"
53#include "jr.h"
54#include "error.h"
55#include "sg_sw_sec4.h"
56#include "key_gen.h"
57#include "caamalg_desc.h"
58
59/*
60 * crypto alg
61 */
62#define CAAM_CRA_PRIORITY 3000
63/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
64#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
65 CTR_RFC3686_NONCE_SIZE + \
66 SHA512_DIGEST_SIZE * 2)
67
68#define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
69#define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
70 CAAM_CMD_SZ * 4)
71#define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
72 CAAM_CMD_SZ * 5)
73
74#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
75#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
76
77#ifdef DEBUG
78/* for print_hex_dumps with line references */
79#define debug(format, arg...) printk(format, arg)
80#else
81#define debug(format, arg...)
82#endif
83
84static struct list_head alg_list;
85
86struct caam_alg_entry {
87 int class1_alg_type;
88 int class2_alg_type;
89 bool rfc3686;
90 bool geniv;
91};
92
93struct caam_aead_alg {
94 struct aead_alg aead;
95 struct caam_alg_entry caam;
96 bool registered;
97};
98
99/*
100 * per-session context
101 */
102struct caam_ctx {
103 u32 sh_desc_enc[DESC_MAX_USED_LEN];
104 u32 sh_desc_dec[DESC_MAX_USED_LEN];
105 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
106 u8 key[CAAM_MAX_KEY_SIZE];
107 dma_addr_t sh_desc_enc_dma;
108 dma_addr_t sh_desc_dec_dma;
109 dma_addr_t sh_desc_givenc_dma;
110 dma_addr_t key_dma;
111 enum dma_data_direction dir;
112 struct device *jrdev;
113 struct alginfo adata;
114 struct alginfo cdata;
115 unsigned int authsize;
116};
117
118static int aead_null_set_sh_desc(struct crypto_aead *aead)
119{
120 struct caam_ctx *ctx = crypto_aead_ctx(aead);
121 struct device *jrdev = ctx->jrdev;
122 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
123 u32 *desc;
124 int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
125 ctx->adata.keylen_pad;
126
127 /*
128 * Job Descriptor and Shared Descriptors
129 * must all fit into the 64-word Descriptor h/w Buffer
130 */
131 if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
132 ctx->adata.key_inline = true;
133 ctx->adata.key_virt = ctx->key;
134 } else {
135 ctx->adata.key_inline = false;
136 ctx->adata.key_dma = ctx->key_dma;
137 }
138
139 /* aead_encrypt shared descriptor */
140 desc = ctx->sh_desc_enc;
141 cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
142 ctrlpriv->era);
143 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
144 desc_bytes(desc), ctx->dir);
145
146 /*
147 * Job Descriptor and Shared Descriptors
148 * must all fit into the 64-word Descriptor h/w Buffer
149 */
150 if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
151 ctx->adata.key_inline = true;
152 ctx->adata.key_virt = ctx->key;
153 } else {
154 ctx->adata.key_inline = false;
155 ctx->adata.key_dma = ctx->key_dma;
156 }
157
158 /* aead_decrypt shared descriptor */
159 desc = ctx->sh_desc_dec;
160 cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
161 ctrlpriv->era);
162 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
163 desc_bytes(desc), ctx->dir);
164
165 return 0;
166}
167
168static int aead_set_sh_desc(struct crypto_aead *aead)
169{
170 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
171 struct caam_aead_alg, aead);
172 unsigned int ivsize = crypto_aead_ivsize(aead);
173 struct caam_ctx *ctx = crypto_aead_ctx(aead);
174 struct device *jrdev = ctx->jrdev;
175 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
176 u32 ctx1_iv_off = 0;
177 u32 *desc, *nonce = NULL;
178 u32 inl_mask;
179 unsigned int data_len[2];
180 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
181 OP_ALG_AAI_CTR_MOD128);
182 const bool is_rfc3686 = alg->caam.rfc3686;
183
184 if (!ctx->authsize)
185 return 0;
186
187 /* NULL encryption / decryption */
188 if (!ctx->cdata.keylen)
189 return aead_null_set_sh_desc(aead);
190
191 /*
192 * AES-CTR needs to load IV in CONTEXT1 reg
193 * at an offset of 128bits (16bytes)
194 * CONTEXT1[255:128] = IV
195 */
196 if (ctr_mode)
197 ctx1_iv_off = 16;
198
199 /*
200 * RFC3686 specific:
201 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
202 */
203 if (is_rfc3686) {
204 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
205 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
206 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
207 }
208
209 data_len[0] = ctx->adata.keylen_pad;
210 data_len[1] = ctx->cdata.keylen;
211
212 if (alg->caam.geniv)
213 goto skip_enc;
214
215 /*
216 * Job Descriptor and Shared Descriptors
217 * must all fit into the 64-word Descriptor h/w Buffer
218 */
219 if (desc_inline_query(DESC_AEAD_ENC_LEN +
220 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
221 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
222 ARRAY_SIZE(data_len)) < 0)
223 return -EINVAL;
224
225 if (inl_mask & 1)
226 ctx->adata.key_virt = ctx->key;
227 else
228 ctx->adata.key_dma = ctx->key_dma;
229
230 if (inl_mask & 2)
231 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
232 else
233 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
234
235 ctx->adata.key_inline = !!(inl_mask & 1);
236 ctx->cdata.key_inline = !!(inl_mask & 2);
237
238 /* aead_encrypt shared descriptor */
239 desc = ctx->sh_desc_enc;
240 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
241 ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
242 false, ctrlpriv->era);
243 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
244 desc_bytes(desc), ctx->dir);
245
246skip_enc:
247 /*
248 * Job Descriptor and Shared Descriptors
249 * must all fit into the 64-word Descriptor h/w Buffer
250 */
251 if (desc_inline_query(DESC_AEAD_DEC_LEN +
252 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
253 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
254 ARRAY_SIZE(data_len)) < 0)
255 return -EINVAL;
256
257 if (inl_mask & 1)
258 ctx->adata.key_virt = ctx->key;
259 else
260 ctx->adata.key_dma = ctx->key_dma;
261
262 if (inl_mask & 2)
263 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
264 else
265 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
266
267 ctx->adata.key_inline = !!(inl_mask & 1);
268 ctx->cdata.key_inline = !!(inl_mask & 2);
269
270 /* aead_decrypt shared descriptor */
271 desc = ctx->sh_desc_dec;
272 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
273 ctx->authsize, alg->caam.geniv, is_rfc3686,
274 nonce, ctx1_iv_off, false, ctrlpriv->era);
275 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
276 desc_bytes(desc), ctx->dir);
277
278 if (!alg->caam.geniv)
279 goto skip_givenc;
280
281 /*
282 * Job Descriptor and Shared Descriptors
283 * must all fit into the 64-word Descriptor h/w Buffer
284 */
285 if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
286 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
287 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
288 ARRAY_SIZE(data_len)) < 0)
289 return -EINVAL;
290
291 if (inl_mask & 1)
292 ctx->adata.key_virt = ctx->key;
293 else
294 ctx->adata.key_dma = ctx->key_dma;
295
296 if (inl_mask & 2)
297 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
298 else
299 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
300
301 ctx->adata.key_inline = !!(inl_mask & 1);
302 ctx->cdata.key_inline = !!(inl_mask & 2);
303
304 /* aead_givencrypt shared descriptor */
305 desc = ctx->sh_desc_enc;
306 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
307 ctx->authsize, is_rfc3686, nonce,
308 ctx1_iv_off, false, ctrlpriv->era);
309 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
310 desc_bytes(desc), ctx->dir);
311
312skip_givenc:
313 return 0;
314}
315
316static int aead_setauthsize(struct crypto_aead *authenc,
317 unsigned int authsize)
318{
319 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
320
321 ctx->authsize = authsize;
322 aead_set_sh_desc(authenc);
323
324 return 0;
325}
326
327static int gcm_set_sh_desc(struct crypto_aead *aead)
328{
329 struct caam_ctx *ctx = crypto_aead_ctx(aead);
330 struct device *jrdev = ctx->jrdev;
331 unsigned int ivsize = crypto_aead_ivsize(aead);
332 u32 *desc;
333 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
334 ctx->cdata.keylen;
335
336 if (!ctx->cdata.keylen || !ctx->authsize)
337 return 0;
338
339 /*
340 * AES GCM encrypt shared descriptor
341 * Job Descriptor and Shared Descriptor
342 * must fit into the 64-word Descriptor h/w Buffer
343 */
344 if (rem_bytes >= DESC_GCM_ENC_LEN) {
345 ctx->cdata.key_inline = true;
346 ctx->cdata.key_virt = ctx->key;
347 } else {
348 ctx->cdata.key_inline = false;
349 ctx->cdata.key_dma = ctx->key_dma;
350 }
351
352 desc = ctx->sh_desc_enc;
353 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
354 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
355 desc_bytes(desc), ctx->dir);
356
357 /*
358 * Job Descriptor and Shared Descriptors
359 * must all fit into the 64-word Descriptor h/w Buffer
360 */
361 if (rem_bytes >= DESC_GCM_DEC_LEN) {
362 ctx->cdata.key_inline = true;
363 ctx->cdata.key_virt = ctx->key;
364 } else {
365 ctx->cdata.key_inline = false;
366 ctx->cdata.key_dma = ctx->key_dma;
367 }
368
369 desc = ctx->sh_desc_dec;
370 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
371 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
372 desc_bytes(desc), ctx->dir);
373
374 return 0;
375}
376
377static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
378{
379 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
380
381 ctx->authsize = authsize;
382 gcm_set_sh_desc(authenc);
383
384 return 0;
385}
386
387static int rfc4106_set_sh_desc(struct crypto_aead *aead)
388{
389 struct caam_ctx *ctx = crypto_aead_ctx(aead);
390 struct device *jrdev = ctx->jrdev;
391 unsigned int ivsize = crypto_aead_ivsize(aead);
392 u32 *desc;
393 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
394 ctx->cdata.keylen;
395
396 if (!ctx->cdata.keylen || !ctx->authsize)
397 return 0;
398
399 /*
400 * RFC4106 encrypt shared descriptor
401 * Job Descriptor and Shared Descriptor
402 * must fit into the 64-word Descriptor h/w Buffer
403 */
404 if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
405 ctx->cdata.key_inline = true;
406 ctx->cdata.key_virt = ctx->key;
407 } else {
408 ctx->cdata.key_inline = false;
409 ctx->cdata.key_dma = ctx->key_dma;
410 }
411
412 desc = ctx->sh_desc_enc;
413 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
414 false);
415 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
416 desc_bytes(desc), ctx->dir);
417
418 /*
419 * Job Descriptor and Shared Descriptors
420 * must all fit into the 64-word Descriptor h/w Buffer
421 */
422 if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
423 ctx->cdata.key_inline = true;
424 ctx->cdata.key_virt = ctx->key;
425 } else {
426 ctx->cdata.key_inline = false;
427 ctx->cdata.key_dma = ctx->key_dma;
428 }
429
430 desc = ctx->sh_desc_dec;
431 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
432 false);
433 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
434 desc_bytes(desc), ctx->dir);
435
436 return 0;
437}
438
439static int rfc4106_setauthsize(struct crypto_aead *authenc,
440 unsigned int authsize)
441{
442 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
443
444 ctx->authsize = authsize;
445 rfc4106_set_sh_desc(authenc);
446
447 return 0;
448}
449
450static int rfc4543_set_sh_desc(struct crypto_aead *aead)
451{
452 struct caam_ctx *ctx = crypto_aead_ctx(aead);
453 struct device *jrdev = ctx->jrdev;
454 unsigned int ivsize = crypto_aead_ivsize(aead);
455 u32 *desc;
456 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
457 ctx->cdata.keylen;
458
459 if (!ctx->cdata.keylen || !ctx->authsize)
460 return 0;
461
462 /*
463 * RFC4543 encrypt shared descriptor
464 * Job Descriptor and Shared Descriptor
465 * must fit into the 64-word Descriptor h/w Buffer
466 */
467 if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
468 ctx->cdata.key_inline = true;
469 ctx->cdata.key_virt = ctx->key;
470 } else {
471 ctx->cdata.key_inline = false;
472 ctx->cdata.key_dma = ctx->key_dma;
473 }
474
475 desc = ctx->sh_desc_enc;
476 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
477 false);
478 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
479 desc_bytes(desc), ctx->dir);
480
481 /*
482 * Job Descriptor and Shared Descriptors
483 * must all fit into the 64-word Descriptor h/w Buffer
484 */
485 if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
486 ctx->cdata.key_inline = true;
487 ctx->cdata.key_virt = ctx->key;
488 } else {
489 ctx->cdata.key_inline = false;
490 ctx->cdata.key_dma = ctx->key_dma;
491 }
492
493 desc = ctx->sh_desc_dec;
494 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
495 false);
496 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
497 desc_bytes(desc), ctx->dir);
498
499 return 0;
500}
501
502static int rfc4543_setauthsize(struct crypto_aead *authenc,
503 unsigned int authsize)
504{
505 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
506
507 ctx->authsize = authsize;
508 rfc4543_set_sh_desc(authenc);
509
510 return 0;
511}
512
513static int aead_setkey(struct crypto_aead *aead,
514 const u8 *key, unsigned int keylen)
515{
516 struct caam_ctx *ctx = crypto_aead_ctx(aead);
517 struct device *jrdev = ctx->jrdev;
518 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
519 struct crypto_authenc_keys keys;
520 int ret = 0;
521
522 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
523 goto badkey;
524
525#ifdef DEBUG
526 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
527 keys.authkeylen + keys.enckeylen, keys.enckeylen,
528 keys.authkeylen);
529 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
530 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
531#endif
532
533 /*
534 * If DKP is supported, use it in the shared descriptor to generate
535 * the split key.
536 */
537 if (ctrlpriv->era >= 6) {
538 ctx->adata.keylen = keys.authkeylen;
539 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
540 OP_ALG_ALGSEL_MASK);
541
542 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
543 goto badkey;
544
545 memcpy(ctx->key, keys.authkey, keys.authkeylen);
546 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
547 keys.enckeylen);
548 dma_sync_single_for_device(jrdev, ctx->key_dma,
549 ctx->adata.keylen_pad +
550 keys.enckeylen, ctx->dir);
551 goto skip_split_key;
552 }
553
554 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
555 keys.authkeylen, CAAM_MAX_KEY_SIZE -
556 keys.enckeylen);
557 if (ret) {
558 goto badkey;
559 }
560
561 /* postpend encryption key to auth split key */
562 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
563 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
564 keys.enckeylen, ctx->dir);
565#ifdef DEBUG
566 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
567 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
568 ctx->adata.keylen_pad + keys.enckeylen, 1);
569#endif
570
571skip_split_key:
572 ctx->cdata.keylen = keys.enckeylen;
573 memzero_explicit(&keys, sizeof(keys));
574 return aead_set_sh_desc(aead);
575badkey:
576 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
577 memzero_explicit(&keys, sizeof(keys));
578 return -EINVAL;
579}
580
581static int gcm_setkey(struct crypto_aead *aead,
582 const u8 *key, unsigned int keylen)
583{
584 struct caam_ctx *ctx = crypto_aead_ctx(aead);
585 struct device *jrdev = ctx->jrdev;
586
587#ifdef DEBUG
588 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
589 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
590#endif
591
592 memcpy(ctx->key, key, keylen);
593 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
594 ctx->cdata.keylen = keylen;
595
596 return gcm_set_sh_desc(aead);
597}
598
599static int rfc4106_setkey(struct crypto_aead *aead,
600 const u8 *key, unsigned int keylen)
601{
602 struct caam_ctx *ctx = crypto_aead_ctx(aead);
603 struct device *jrdev = ctx->jrdev;
604
605 if (keylen < 4)
606 return -EINVAL;
607
608#ifdef DEBUG
609 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
610 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
611#endif
612
613 memcpy(ctx->key, key, keylen);
614
615 /*
616 * The last four bytes of the key material are used as the salt value
617 * in the nonce. Update the AES key length.
618 */
619 ctx->cdata.keylen = keylen - 4;
620 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
621 ctx->dir);
622 return rfc4106_set_sh_desc(aead);
623}
624
625static int rfc4543_setkey(struct crypto_aead *aead,
626 const u8 *key, unsigned int keylen)
627{
628 struct caam_ctx *ctx = crypto_aead_ctx(aead);
629 struct device *jrdev = ctx->jrdev;
630
631 if (keylen < 4)
632 return -EINVAL;
633
634#ifdef DEBUG
635 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
636 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
637#endif
638
639 memcpy(ctx->key, key, keylen);
640
641 /*
642 * The last four bytes of the key material are used as the salt value
643 * in the nonce. Update the AES key length.
644 */
645 ctx->cdata.keylen = keylen - 4;
646 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
647 ctx->dir);
648 return rfc4543_set_sh_desc(aead);
649}
650
651static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
652 const u8 *key, unsigned int keylen)
653{
654 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
655 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
656 const char *alg_name = crypto_tfm_alg_name(tfm);
657 struct device *jrdev = ctx->jrdev;
658 unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
659 u32 *desc;
660 u32 ctx1_iv_off = 0;
661 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
662 OP_ALG_AAI_CTR_MOD128);
663 const bool is_rfc3686 = (ctr_mode &&
664 (strstr(alg_name, "rfc3686") != NULL));
665
666#ifdef DEBUG
667 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
668 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
669#endif
670 /*
671 * AES-CTR needs to load IV in CONTEXT1 reg
672 * at an offset of 128bits (16bytes)
673 * CONTEXT1[255:128] = IV
674 */
675 if (ctr_mode)
676 ctx1_iv_off = 16;
677
678 /*
679 * RFC3686 specific:
680 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
681 * | *key = {KEY, NONCE}
682 */
683 if (is_rfc3686) {
684 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
685 keylen -= CTR_RFC3686_NONCE_SIZE;
686 }
687
688 ctx->cdata.keylen = keylen;
689 ctx->cdata.key_virt = key;
690 ctx->cdata.key_inline = true;
691
692 /* ablkcipher_encrypt shared descriptor */
693 desc = ctx->sh_desc_enc;
694 cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
695 ctx1_iv_off);
696 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
697 desc_bytes(desc), ctx->dir);
698
699 /* ablkcipher_decrypt shared descriptor */
700 desc = ctx->sh_desc_dec;
701 cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
702 ctx1_iv_off);
703 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
704 desc_bytes(desc), ctx->dir);
705
706 /* ablkcipher_givencrypt shared descriptor */
707 desc = ctx->sh_desc_givenc;
708 cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
709 ctx1_iv_off);
710 dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
711 desc_bytes(desc), ctx->dir);
712
713 return 0;
714}
715
716static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
717 const u8 *key, unsigned int keylen)
718{
719 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
720 struct device *jrdev = ctx->jrdev;
721 u32 *desc;
722
723 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
724 crypto_ablkcipher_set_flags(ablkcipher,
725 CRYPTO_TFM_RES_BAD_KEY_LEN);
726 dev_err(jrdev, "key size mismatch\n");
727 return -EINVAL;
728 }
729
730 ctx->cdata.keylen = keylen;
731 ctx->cdata.key_virt = key;
732 ctx->cdata.key_inline = true;
733
734 /* xts_ablkcipher_encrypt shared descriptor */
735 desc = ctx->sh_desc_enc;
736 cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
737 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
738 desc_bytes(desc), ctx->dir);
739
740 /* xts_ablkcipher_decrypt shared descriptor */
741 desc = ctx->sh_desc_dec;
742 cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
743 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
744 desc_bytes(desc), ctx->dir);
745
746 return 0;
747}
748
749/*
750 * aead_edesc - s/w-extended aead descriptor
751 * @src_nents: number of segments in input s/w scatterlist
752 * @dst_nents: number of segments in output s/w scatterlist
753 * @sec4_sg_bytes: length of dma mapped sec4_sg space
754 * @sec4_sg_dma: bus physical mapped address of h/w link table
755 * @sec4_sg: pointer to h/w link table
756 * @hw_desc: the h/w job descriptor followed by any referenced link tables
757 */
758struct aead_edesc {
759 int src_nents;
760 int dst_nents;
761 int sec4_sg_bytes;
762 dma_addr_t sec4_sg_dma;
763 struct sec4_sg_entry *sec4_sg;
764 u32 hw_desc[];
765};
766
767/*
768 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
769 * @src_nents: number of segments in input s/w scatterlist
770 * @dst_nents: number of segments in output s/w scatterlist
771 * @iv_dma: dma address of iv for checking continuity and link table
772 * @iv_dir: DMA mapping direction for IV
773 * @sec4_sg_bytes: length of dma mapped sec4_sg space
774 * @sec4_sg_dma: bus physical mapped address of h/w link table
775 * @sec4_sg: pointer to h/w link table
776 * @hw_desc: the h/w job descriptor followed by any referenced link tables
777 * and IV
778 */
779struct ablkcipher_edesc {
780 int src_nents;
781 int dst_nents;
782 dma_addr_t iv_dma;
783 enum dma_data_direction iv_dir;
784 int sec4_sg_bytes;
785 dma_addr_t sec4_sg_dma;
786 struct sec4_sg_entry *sec4_sg;
787 u32 hw_desc[0];
788};
789
790static void caam_unmap(struct device *dev, struct scatterlist *src,
791 struct scatterlist *dst, int src_nents,
792 int dst_nents,
793 dma_addr_t iv_dma, int ivsize,
794 enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma,
795 int sec4_sg_bytes)
796{
797 if (dst != src) {
798 if (src_nents)
799 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
800 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
801 } else {
802 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
803 }
804
805 if (iv_dma)
806 dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
807 if (sec4_sg_bytes)
808 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
809 DMA_TO_DEVICE);
810}
811
812static void aead_unmap(struct device *dev,
813 struct aead_edesc *edesc,
814 struct aead_request *req)
815{
816 caam_unmap(dev, req->src, req->dst,
817 edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE,
818 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
819}
820
821static void ablkcipher_unmap(struct device *dev,
822 struct ablkcipher_edesc *edesc,
823 struct ablkcipher_request *req)
824{
825 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
826 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
827
828 caam_unmap(dev, req->src, req->dst,
829 edesc->src_nents, edesc->dst_nents,
830 edesc->iv_dma, ivsize, edesc->iv_dir,
831 edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
832}
833
834static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
835 void *context)
836{
837 struct aead_request *req = context;
838 struct aead_edesc *edesc;
839
840#ifdef DEBUG
841 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
842#endif
843
844 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
845
846 if (err)
847 caam_jr_strstatus(jrdev, err);
848
849 aead_unmap(jrdev, edesc, req);
850
851 kfree(edesc);
852
853 aead_request_complete(req, err);
854}
855
856static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
857 void *context)
858{
859 struct aead_request *req = context;
860 struct aead_edesc *edesc;
861
862#ifdef DEBUG
863 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
864#endif
865
866 edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
867
868 if (err)
869 caam_jr_strstatus(jrdev, err);
870
871 aead_unmap(jrdev, edesc, req);
872
873 /*
874 * verify hw auth check passed else return -EBADMSG
875 */
876 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
877 err = -EBADMSG;
878
879 kfree(edesc);
880
881 aead_request_complete(req, err);
882}
883
884static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
885 void *context)
886{
887 struct ablkcipher_request *req = context;
888 struct ablkcipher_edesc *edesc;
889 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
890 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
891 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
892
893#ifdef DEBUG
894 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
895#endif
896
897 edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
898
899 if (err)
900 caam_jr_strstatus(jrdev, err);
901
902#ifdef DEBUG
903 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
904 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
905 edesc->src_nents > 1 ? 100 : ivsize, 1);
906#endif
907 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
908 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
909 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
910
911 ablkcipher_unmap(jrdev, edesc, req);
912
913 /*
914 * The crypto API expects us to set the IV (req->info) to the last
915 * ciphertext block when running in CBC mode.
916 */
917 if ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CBC)
918 scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
919 ivsize, ivsize, 0);
920
921 /* In case initial IV was generated, copy it in GIVCIPHER request */
922 if (edesc->iv_dir == DMA_FROM_DEVICE) {
923 u8 *iv;
924 struct skcipher_givcrypt_request *greq;
925
926 greq = container_of(req, struct skcipher_givcrypt_request,
927 creq);
928 iv = (u8 *)edesc->hw_desc + desc_bytes(edesc->hw_desc) +
929 edesc->sec4_sg_bytes;
930 memcpy(greq->giv, iv, ivsize);
931 }
932
933 kfree(edesc);
934
935 ablkcipher_request_complete(req, err);
936}
937
938static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
939 void *context)
940{
941 struct ablkcipher_request *req = context;
942 struct ablkcipher_edesc *edesc;
943#ifdef DEBUG
944 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
945 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
946
947 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
948#endif
949
950 edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
951 if (err)
952 caam_jr_strstatus(jrdev, err);
953
954#ifdef DEBUG
955 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
956 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
957 ivsize, 1);
958#endif
959 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
960 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
961 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
962
963 ablkcipher_unmap(jrdev, edesc, req);
964 kfree(edesc);
965
966 ablkcipher_request_complete(req, err);
967}
968
969/*
970 * Fill in aead job descriptor
971 */
972static void init_aead_job(struct aead_request *req,
973 struct aead_edesc *edesc,
974 bool all_contig, bool encrypt)
975{
976 struct crypto_aead *aead = crypto_aead_reqtfm(req);
977 struct caam_ctx *ctx = crypto_aead_ctx(aead);
978 int authsize = ctx->authsize;
979 u32 *desc = edesc->hw_desc;
980 u32 out_options, in_options;
981 dma_addr_t dst_dma, src_dma;
982 int len, sec4_sg_index = 0;
983 dma_addr_t ptr;
984 u32 *sh_desc;
985
986 sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
987 ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
988
989 len = desc_len(sh_desc);
990 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
991
992 if (all_contig) {
993 src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0;
994 in_options = 0;
995 } else {
996 src_dma = edesc->sec4_sg_dma;
997 sec4_sg_index += edesc->src_nents;
998 in_options = LDST_SGF;
999 }
1000
1001 append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
1002 in_options);
1003
1004 dst_dma = src_dma;
1005 out_options = in_options;
1006
1007 if (unlikely(req->src != req->dst)) {
1008 if (edesc->dst_nents == 1) {
1009 dst_dma = sg_dma_address(req->dst);
1010 out_options = 0;
1011 } else {
1012 dst_dma = edesc->sec4_sg_dma +
1013 sec4_sg_index *
1014 sizeof(struct sec4_sg_entry);
1015 out_options = LDST_SGF;
1016 }
1017 }
1018
1019 if (encrypt)
1020 append_seq_out_ptr(desc, dst_dma,
1021 req->assoclen + req->cryptlen + authsize,
1022 out_options);
1023 else
1024 append_seq_out_ptr(desc, dst_dma,
1025 req->assoclen + req->cryptlen - authsize,
1026 out_options);
1027}
1028
1029static void init_gcm_job(struct aead_request *req,
1030 struct aead_edesc *edesc,
1031 bool all_contig, bool encrypt)
1032{
1033 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1034 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1035 unsigned int ivsize = crypto_aead_ivsize(aead);
1036 u32 *desc = edesc->hw_desc;
1037 bool generic_gcm = (ivsize == GCM_AES_IV_SIZE);
1038 unsigned int last;
1039
1040 init_aead_job(req, edesc, all_contig, encrypt);
1041 append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1042
1043 /* BUG This should not be specific to generic GCM. */
1044 last = 0;
1045 if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
1046 last = FIFOLD_TYPE_LAST1;
1047
1048 /* Read GCM IV */
1049 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
1050 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | GCM_AES_IV_SIZE | last);
1051 /* Append Salt */
1052 if (!generic_gcm)
1053 append_data(desc, ctx->key + ctx->cdata.keylen, 4);
1054 /* Append IV */
1055 append_data(desc, req->iv, ivsize);
1056 /* End of blank commands */
1057}
1058
1059static void init_authenc_job(struct aead_request *req,
1060 struct aead_edesc *edesc,
1061 bool all_contig, bool encrypt)
1062{
1063 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1064 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
1065 struct caam_aead_alg, aead);
1066 unsigned int ivsize = crypto_aead_ivsize(aead);
1067 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1068 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
1069 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
1070 OP_ALG_AAI_CTR_MOD128);
1071 const bool is_rfc3686 = alg->caam.rfc3686;
1072 u32 *desc = edesc->hw_desc;
1073 u32 ivoffset = 0;
1074
1075 /*
1076 * AES-CTR needs to load IV in CONTEXT1 reg
1077 * at an offset of 128bits (16bytes)
1078 * CONTEXT1[255:128] = IV
1079 */
1080 if (ctr_mode)
1081 ivoffset = 16;
1082
1083 /*
1084 * RFC3686 specific:
1085 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1086 */
1087 if (is_rfc3686)
1088 ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
1089
1090 init_aead_job(req, edesc, all_contig, encrypt);
1091
1092 /*
1093 * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
1094 * having DPOVRD as destination.
1095 */
1096 if (ctrlpriv->era < 3)
1097 append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1098 else
1099 append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
1100
1101 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
1102 append_load_as_imm(desc, req->iv, ivsize,
1103 LDST_CLASS_1_CCB |
1104 LDST_SRCDST_BYTE_CONTEXT |
1105 (ivoffset << LDST_OFFSET_SHIFT));
1106}
1107
1108/*
1109 * Fill in ablkcipher job descriptor
1110 */
1111static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1112 struct ablkcipher_edesc *edesc,
1113 struct ablkcipher_request *req)
1114{
1115 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1116 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1117 u32 *desc = edesc->hw_desc;
1118 u32 out_options = 0;
1119 dma_addr_t dst_dma;
1120 int len;
1121
1122#ifdef DEBUG
1123 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1124 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1125 ivsize, 1);
1126 pr_err("asked=%d, nbytes%d\n",
1127 (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
1128#endif
1129 caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ",
1130 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
1131 edesc->src_nents > 1 ? 100 : req->nbytes, 1);
1132
1133 len = desc_len(sh_desc);
1134 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1135
1136 append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->nbytes + ivsize,
1137 LDST_SGF);
1138
1139 if (likely(req->src == req->dst)) {
1140 dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry);
1141 out_options = LDST_SGF;
1142 } else {
1143 if (edesc->dst_nents == 1) {
1144 dst_dma = sg_dma_address(req->dst);
1145 } else {
1146 dst_dma = edesc->sec4_sg_dma + (edesc->src_nents + 1) *
1147 sizeof(struct sec4_sg_entry);
1148 out_options = LDST_SGF;
1149 }
1150 }
1151 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1152}
1153
1154/*
1155 * Fill in ablkcipher givencrypt job descriptor
1156 */
1157static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
1158 struct ablkcipher_edesc *edesc,
1159 struct ablkcipher_request *req)
1160{
1161 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1162 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1163 u32 *desc = edesc->hw_desc;
1164 u32 in_options;
1165 dma_addr_t dst_dma, src_dma;
1166 int len, sec4_sg_index = 0;
1167
1168#ifdef DEBUG
1169 print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
1170 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1171 ivsize, 1);
1172#endif
1173 caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
1174 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
1175 edesc->src_nents > 1 ? 100 : req->nbytes, 1);
1176
1177 len = desc_len(sh_desc);
1178 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1179
1180 if (edesc->src_nents == 1) {
1181 src_dma = sg_dma_address(req->src);
1182 in_options = 0;
1183 } else {
1184 src_dma = edesc->sec4_sg_dma;
1185 sec4_sg_index += edesc->src_nents;
1186 in_options = LDST_SGF;
1187 }
1188 append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
1189
1190 dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
1191 sizeof(struct sec4_sg_entry);
1192 append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, LDST_SGF);
1193}
1194
1195/*
1196 * allocate and map the aead extended descriptor
1197 */
1198static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1199 int desc_bytes, bool *all_contig_ptr,
1200 bool encrypt)
1201{
1202 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1203 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1204 struct device *jrdev = ctx->jrdev;
1205 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1206 GFP_KERNEL : GFP_ATOMIC;
1207 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1208 struct aead_edesc *edesc;
1209 int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
1210 unsigned int authsize = ctx->authsize;
1211
1212 if (unlikely(req->dst != req->src)) {
1213 src_nents = sg_nents_for_len(req->src, req->assoclen +
1214 req->cryptlen);
1215 if (unlikely(src_nents < 0)) {
1216 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1217 req->assoclen + req->cryptlen);
1218 return ERR_PTR(src_nents);
1219 }
1220
1221 dst_nents = sg_nents_for_len(req->dst, req->assoclen +
1222 req->cryptlen +
1223 (encrypt ? authsize :
1224 (-authsize)));
1225 if (unlikely(dst_nents < 0)) {
1226 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
1227 req->assoclen + req->cryptlen +
1228 (encrypt ? authsize : (-authsize)));
1229 return ERR_PTR(dst_nents);
1230 }
1231 } else {
1232 src_nents = sg_nents_for_len(req->src, req->assoclen +
1233 req->cryptlen +
1234 (encrypt ? authsize : 0));
1235 if (unlikely(src_nents < 0)) {
1236 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1237 req->assoclen + req->cryptlen +
1238 (encrypt ? authsize : 0));
1239 return ERR_PTR(src_nents);
1240 }
1241 }
1242
1243 if (likely(req->src == req->dst)) {
1244 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1245 DMA_BIDIRECTIONAL);
1246 if (unlikely(!mapped_src_nents)) {
1247 dev_err(jrdev, "unable to map source\n");
1248 return ERR_PTR(-ENOMEM);
1249 }
1250 } else {
1251 /* Cover also the case of null (zero length) input data */
1252 if (src_nents) {
1253 mapped_src_nents = dma_map_sg(jrdev, req->src,
1254 src_nents, DMA_TO_DEVICE);
1255 if (unlikely(!mapped_src_nents)) {
1256 dev_err(jrdev, "unable to map source\n");
1257 return ERR_PTR(-ENOMEM);
1258 }
1259 } else {
1260 mapped_src_nents = 0;
1261 }
1262
1263 mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
1264 DMA_FROM_DEVICE);
1265 if (unlikely(!mapped_dst_nents)) {
1266 dev_err(jrdev, "unable to map destination\n");
1267 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1268 return ERR_PTR(-ENOMEM);
1269 }
1270 }
1271
1272 sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
1273 sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
1274 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1275
1276 /* allocate space for base edesc and hw desc commands, link tables */
1277 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
1278 GFP_DMA | flags);
1279 if (!edesc) {
1280 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1281 0, DMA_NONE, 0, 0);
1282 return ERR_PTR(-ENOMEM);
1283 }
1284
1285 edesc->src_nents = src_nents;
1286 edesc->dst_nents = dst_nents;
1287 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1288 desc_bytes;
1289 *all_contig_ptr = !(mapped_src_nents > 1);
1290
1291 sec4_sg_index = 0;
1292 if (mapped_src_nents > 1) {
1293 sg_to_sec4_sg_last(req->src, mapped_src_nents,
1294 edesc->sec4_sg + sec4_sg_index, 0);
1295 sec4_sg_index += mapped_src_nents;
1296 }
1297 if (mapped_dst_nents > 1) {
1298 sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
1299 edesc->sec4_sg + sec4_sg_index, 0);
1300 }
1301
1302 if (!sec4_sg_bytes)
1303 return edesc;
1304
1305 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1306 sec4_sg_bytes, DMA_TO_DEVICE);
1307 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1308 dev_err(jrdev, "unable to map S/G table\n");
1309 aead_unmap(jrdev, edesc, req);
1310 kfree(edesc);
1311 return ERR_PTR(-ENOMEM);
1312 }
1313
1314 edesc->sec4_sg_bytes = sec4_sg_bytes;
1315
1316 return edesc;
1317}
1318
1319static int gcm_encrypt(struct aead_request *req)
1320{
1321 struct aead_edesc *edesc;
1322 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1323 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1324 struct device *jrdev = ctx->jrdev;
1325 bool all_contig;
1326 u32 *desc;
1327 int ret = 0;
1328
1329 /* allocate extended descriptor */
1330 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
1331 if (IS_ERR(edesc))
1332 return PTR_ERR(edesc);
1333
1334 /* Create and submit job descriptor */
1335 init_gcm_job(req, edesc, all_contig, true);
1336#ifdef DEBUG
1337 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1338 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1339 desc_bytes(edesc->hw_desc), 1);
1340#endif
1341
1342 desc = edesc->hw_desc;
1343 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1344 if (!ret) {
1345 ret = -EINPROGRESS;
1346 } else {
1347 aead_unmap(jrdev, edesc, req);
1348 kfree(edesc);
1349 }
1350
1351 return ret;
1352}
1353
1354static int ipsec_gcm_encrypt(struct aead_request *req)
1355{
1356 if (req->assoclen < 8)
1357 return -EINVAL;
1358
1359 return gcm_encrypt(req);
1360}
1361
1362static int aead_encrypt(struct aead_request *req)
1363{
1364 struct aead_edesc *edesc;
1365 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1366 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1367 struct device *jrdev = ctx->jrdev;
1368 bool all_contig;
1369 u32 *desc;
1370 int ret = 0;
1371
1372 /* allocate extended descriptor */
1373 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
1374 &all_contig, true);
1375 if (IS_ERR(edesc))
1376 return PTR_ERR(edesc);
1377
1378 /* Create and submit job descriptor */
1379 init_authenc_job(req, edesc, all_contig, true);
1380#ifdef DEBUG
1381 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1382 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1383 desc_bytes(edesc->hw_desc), 1);
1384#endif
1385
1386 desc = edesc->hw_desc;
1387 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1388 if (!ret) {
1389 ret = -EINPROGRESS;
1390 } else {
1391 aead_unmap(jrdev, edesc, req);
1392 kfree(edesc);
1393 }
1394
1395 return ret;
1396}
1397
1398static int gcm_decrypt(struct aead_request *req)
1399{
1400 struct aead_edesc *edesc;
1401 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1402 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1403 struct device *jrdev = ctx->jrdev;
1404 bool all_contig;
1405 u32 *desc;
1406 int ret = 0;
1407
1408 /* allocate extended descriptor */
1409 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
1410 if (IS_ERR(edesc))
1411 return PTR_ERR(edesc);
1412
1413 /* Create and submit job descriptor*/
1414 init_gcm_job(req, edesc, all_contig, false);
1415#ifdef DEBUG
1416 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1417 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1418 desc_bytes(edesc->hw_desc), 1);
1419#endif
1420
1421 desc = edesc->hw_desc;
1422 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1423 if (!ret) {
1424 ret = -EINPROGRESS;
1425 } else {
1426 aead_unmap(jrdev, edesc, req);
1427 kfree(edesc);
1428 }
1429
1430 return ret;
1431}
1432
1433static int ipsec_gcm_decrypt(struct aead_request *req)
1434{
1435 if (req->assoclen < 8)
1436 return -EINVAL;
1437
1438 return gcm_decrypt(req);
1439}
1440
1441static int aead_decrypt(struct aead_request *req)
1442{
1443 struct aead_edesc *edesc;
1444 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1445 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1446 struct device *jrdev = ctx->jrdev;
1447 bool all_contig;
1448 u32 *desc;
1449 int ret = 0;
1450
1451 caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
1452 DUMP_PREFIX_ADDRESS, 16, 4, req->src,
1453 req->assoclen + req->cryptlen, 1);
1454
1455 /* allocate extended descriptor */
1456 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
1457 &all_contig, false);
1458 if (IS_ERR(edesc))
1459 return PTR_ERR(edesc);
1460
1461 /* Create and submit job descriptor*/
1462 init_authenc_job(req, edesc, all_contig, false);
1463#ifdef DEBUG
1464 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1465 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1466 desc_bytes(edesc->hw_desc), 1);
1467#endif
1468
1469 desc = edesc->hw_desc;
1470 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1471 if (!ret) {
1472 ret = -EINPROGRESS;
1473 } else {
1474 aead_unmap(jrdev, edesc, req);
1475 kfree(edesc);
1476 }
1477
1478 return ret;
1479}
1480
1481/*
1482 * allocate and map the ablkcipher extended descriptor for ablkcipher
1483 */
1484static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1485 *req, int desc_bytes)
1486{
1487 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1488 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1489 struct device *jrdev = ctx->jrdev;
1490 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1491 GFP_KERNEL : GFP_ATOMIC;
1492 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1493 struct ablkcipher_edesc *edesc;
1494 dma_addr_t iv_dma;
1495 u8 *iv;
1496 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1497 int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
1498
1499 src_nents = sg_nents_for_len(req->src, req->nbytes);
1500 if (unlikely(src_nents < 0)) {
1501 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1502 req->nbytes);
1503 return ERR_PTR(src_nents);
1504 }
1505
1506 if (req->dst != req->src) {
1507 dst_nents = sg_nents_for_len(req->dst, req->nbytes);
1508 if (unlikely(dst_nents < 0)) {
1509 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
1510 req->nbytes);
1511 return ERR_PTR(dst_nents);
1512 }
1513 }
1514
1515 if (likely(req->src == req->dst)) {
1516 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1517 DMA_BIDIRECTIONAL);
1518 if (unlikely(!mapped_src_nents)) {
1519 dev_err(jrdev, "unable to map source\n");
1520 return ERR_PTR(-ENOMEM);
1521 }
1522 } else {
1523 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1524 DMA_TO_DEVICE);
1525 if (unlikely(!mapped_src_nents)) {
1526 dev_err(jrdev, "unable to map source\n");
1527 return ERR_PTR(-ENOMEM);
1528 }
1529
1530 mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
1531 DMA_FROM_DEVICE);
1532 if (unlikely(!mapped_dst_nents)) {
1533 dev_err(jrdev, "unable to map destination\n");
1534 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1535 return ERR_PTR(-ENOMEM);
1536 }
1537 }
1538
1539 sec4_sg_ents = 1 + mapped_src_nents;
1540 dst_sg_idx = sec4_sg_ents;
1541 sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
1542 sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
1543
1544 /*
1545 * allocate space for base edesc and hw desc commands, link tables, IV
1546 */
1547 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
1548 GFP_DMA | flags);
1549 if (!edesc) {
1550 dev_err(jrdev, "could not allocate extended descriptor\n");
1551 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1552 0, DMA_NONE, 0, 0);
1553 return ERR_PTR(-ENOMEM);
1554 }
1555
1556 edesc->src_nents = src_nents;
1557 edesc->dst_nents = dst_nents;
1558 edesc->sec4_sg_bytes = sec4_sg_bytes;
1559 edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
1560 desc_bytes);
1561 edesc->iv_dir = DMA_TO_DEVICE;
1562
1563 /* Make sure IV is located in a DMAable area */
1564 iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
1565 memcpy(iv, req->info, ivsize);
1566
1567 iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
1568 if (dma_mapping_error(jrdev, iv_dma)) {
1569 dev_err(jrdev, "unable to map IV\n");
1570 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1571 0, DMA_NONE, 0, 0);
1572 kfree(edesc);
1573 return ERR_PTR(-ENOMEM);
1574 }
1575
1576 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1577 sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0);
1578
1579 if (mapped_dst_nents > 1) {
1580 sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
1581 edesc->sec4_sg + dst_sg_idx, 0);
1582 }
1583
1584 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1585 sec4_sg_bytes, DMA_TO_DEVICE);
1586 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1587 dev_err(jrdev, "unable to map S/G table\n");
1588 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
1589 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
1590 kfree(edesc);
1591 return ERR_PTR(-ENOMEM);
1592 }
1593
1594 edesc->iv_dma = iv_dma;
1595
1596#ifdef DEBUG
1597 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
1598 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1599 sec4_sg_bytes, 1);
1600#endif
1601
1602 return edesc;
1603}
1604
1605static int ablkcipher_encrypt(struct ablkcipher_request *req)
1606{
1607 struct ablkcipher_edesc *edesc;
1608 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1609 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1610 struct device *jrdev = ctx->jrdev;
1611 u32 *desc;
1612 int ret = 0;
1613
1614 /* allocate extended descriptor */
1615 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
1616 if (IS_ERR(edesc))
1617 return PTR_ERR(edesc);
1618
1619 /* Create and submit job descriptor*/
1620 init_ablkcipher_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req);
1621#ifdef DEBUG
1622 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
1623 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1624 desc_bytes(edesc->hw_desc), 1);
1625#endif
1626 desc = edesc->hw_desc;
1627 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1628
1629 if (!ret) {
1630 ret = -EINPROGRESS;
1631 } else {
1632 ablkcipher_unmap(jrdev, edesc, req);
1633 kfree(edesc);
1634 }
1635
1636 return ret;
1637}
1638
1639static int ablkcipher_decrypt(struct ablkcipher_request *req)
1640{
1641 struct ablkcipher_edesc *edesc;
1642 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1643 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1644 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1645 struct device *jrdev = ctx->jrdev;
1646 u32 *desc;
1647 int ret = 0;
1648
1649 /* allocate extended descriptor */
1650 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
1651 if (IS_ERR(edesc))
1652 return PTR_ERR(edesc);
1653
1654 /*
1655 * The crypto API expects us to set the IV (req->info) to the last
1656 * ciphertext block when running in CBC mode.
1657 */
1658 if ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CBC)
1659 scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
1660 ivsize, ivsize, 0);
1661
1662 /* Create and submit job descriptor*/
1663 init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req);
1664 desc = edesc->hw_desc;
1665#ifdef DEBUG
1666 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
1667 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1668 desc_bytes(edesc->hw_desc), 1);
1669#endif
1670
1671 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1672 if (!ret) {
1673 ret = -EINPROGRESS;
1674 } else {
1675 ablkcipher_unmap(jrdev, edesc, req);
1676 kfree(edesc);
1677 }
1678
1679 return ret;
1680}
1681
1682/*
1683 * allocate and map the ablkcipher extended descriptor
1684 * for ablkcipher givencrypt
1685 */
1686static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1687 struct skcipher_givcrypt_request *greq,
1688 int desc_bytes)
1689{
1690 struct ablkcipher_request *req = &greq->creq;
1691 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1692 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1693 struct device *jrdev = ctx->jrdev;
1694 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1695 GFP_KERNEL : GFP_ATOMIC;
1696 int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
1697 struct ablkcipher_edesc *edesc;
1698 dma_addr_t iv_dma;
1699 u8 *iv;
1700 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1701 int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
1702
1703 src_nents = sg_nents_for_len(req->src, req->nbytes);
1704 if (unlikely(src_nents < 0)) {
1705 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
1706 req->nbytes);
1707 return ERR_PTR(src_nents);
1708 }
1709
1710 if (likely(req->src == req->dst)) {
1711 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1712 DMA_BIDIRECTIONAL);
1713 if (unlikely(!mapped_src_nents)) {
1714 dev_err(jrdev, "unable to map source\n");
1715 return ERR_PTR(-ENOMEM);
1716 }
1717
1718 dst_nents = src_nents;
1719 mapped_dst_nents = src_nents;
1720 } else {
1721 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
1722 DMA_TO_DEVICE);
1723 if (unlikely(!mapped_src_nents)) {
1724 dev_err(jrdev, "unable to map source\n");
1725 return ERR_PTR(-ENOMEM);
1726 }
1727
1728 dst_nents = sg_nents_for_len(req->dst, req->nbytes);
1729 if (unlikely(dst_nents < 0)) {
1730 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
1731 req->nbytes);
1732 return ERR_PTR(dst_nents);
1733 }
1734
1735 mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
1736 DMA_FROM_DEVICE);
1737 if (unlikely(!mapped_dst_nents)) {
1738 dev_err(jrdev, "unable to map destination\n");
1739 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1740 return ERR_PTR(-ENOMEM);
1741 }
1742 }
1743
1744 sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
1745 dst_sg_idx = sec4_sg_ents;
1746 sec4_sg_ents += 1 + mapped_dst_nents;
1747
1748 /*
1749 * allocate space for base edesc and hw desc commands, link tables, IV
1750 */
1751 sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
1752 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
1753 GFP_DMA | flags);
1754 if (!edesc) {
1755 dev_err(jrdev, "could not allocate extended descriptor\n");
1756 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1757 0, DMA_NONE, 0, 0);
1758 return ERR_PTR(-ENOMEM);
1759 }
1760
1761 edesc->src_nents = src_nents;
1762 edesc->dst_nents = dst_nents;
1763 edesc->sec4_sg_bytes = sec4_sg_bytes;
1764 edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
1765 desc_bytes);
1766 edesc->iv_dir = DMA_FROM_DEVICE;
1767
1768 /* Make sure IV is located in a DMAable area */
1769 iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
1770 iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_FROM_DEVICE);
1771 if (dma_mapping_error(jrdev, iv_dma)) {
1772 dev_err(jrdev, "unable to map IV\n");
1773 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
1774 0, DMA_NONE, 0, 0);
1775 kfree(edesc);
1776 return ERR_PTR(-ENOMEM);
1777 }
1778
1779 if (mapped_src_nents > 1)
1780 sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
1781 0);
1782
1783 dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, iv_dma, ivsize, 0);
1784 sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg +
1785 dst_sg_idx + 1, 0);
1786
1787 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1788 sec4_sg_bytes, DMA_TO_DEVICE);
1789 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1790 dev_err(jrdev, "unable to map S/G table\n");
1791 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
1792 iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0);
1793 kfree(edesc);
1794 return ERR_PTR(-ENOMEM);
1795 }
1796 edesc->iv_dma = iv_dma;
1797
1798#ifdef DEBUG
1799 print_hex_dump(KERN_ERR,
1800 "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
1801 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1802 sec4_sg_bytes, 1);
1803#endif
1804
1805 return edesc;
1806}
1807
1808static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
1809{
1810 struct ablkcipher_request *req = &creq->creq;
1811 struct ablkcipher_edesc *edesc;
1812 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1813 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1814 struct device *jrdev = ctx->jrdev;
1815 u32 *desc;
1816 int ret = 0;
1817
1818 /* allocate extended descriptor */
1819 edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
1820 if (IS_ERR(edesc))
1821 return PTR_ERR(edesc);
1822
1823 /* Create and submit job descriptor*/
1824 init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
1825 edesc, req);
1826#ifdef DEBUG
1827 print_hex_dump(KERN_ERR,
1828 "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
1829 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1830 desc_bytes(edesc->hw_desc), 1);
1831#endif
1832 desc = edesc->hw_desc;
1833 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1834
1835 if (!ret) {
1836 ret = -EINPROGRESS;
1837 } else {
1838 ablkcipher_unmap(jrdev, edesc, req);
1839 kfree(edesc);
1840 }
1841
1842 return ret;
1843}
1844
1845#define template_aead template_u.aead
1846#define template_ablkcipher template_u.ablkcipher
1847struct caam_alg_template {
1848 char name[CRYPTO_MAX_ALG_NAME];
1849 char driver_name[CRYPTO_MAX_ALG_NAME];
1850 unsigned int blocksize;
1851 u32 type;
1852 union {
1853 struct ablkcipher_alg ablkcipher;
1854 } template_u;
1855 u32 class1_alg_type;
1856 u32 class2_alg_type;
1857};
1858
1859static struct caam_alg_template driver_algs[] = {
1860 /* ablkcipher descriptor */
1861 {
1862 .name = "cbc(aes)",
1863 .driver_name = "cbc-aes-caam",
1864 .blocksize = AES_BLOCK_SIZE,
1865 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1866 .template_ablkcipher = {
1867 .setkey = ablkcipher_setkey,
1868 .encrypt = ablkcipher_encrypt,
1869 .decrypt = ablkcipher_decrypt,
1870 .givencrypt = ablkcipher_givencrypt,
1871 .geniv = "<built-in>",
1872 .min_keysize = AES_MIN_KEY_SIZE,
1873 .max_keysize = AES_MAX_KEY_SIZE,
1874 .ivsize = AES_BLOCK_SIZE,
1875 },
1876 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1877 },
1878 {
1879 .name = "cbc(des3_ede)",
1880 .driver_name = "cbc-3des-caam",
1881 .blocksize = DES3_EDE_BLOCK_SIZE,
1882 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1883 .template_ablkcipher = {
1884 .setkey = ablkcipher_setkey,
1885 .encrypt = ablkcipher_encrypt,
1886 .decrypt = ablkcipher_decrypt,
1887 .givencrypt = ablkcipher_givencrypt,
1888 .geniv = "<built-in>",
1889 .min_keysize = DES3_EDE_KEY_SIZE,
1890 .max_keysize = DES3_EDE_KEY_SIZE,
1891 .ivsize = DES3_EDE_BLOCK_SIZE,
1892 },
1893 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1894 },
1895 {
1896 .name = "cbc(des)",
1897 .driver_name = "cbc-des-caam",
1898 .blocksize = DES_BLOCK_SIZE,
1899 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1900 .template_ablkcipher = {
1901 .setkey = ablkcipher_setkey,
1902 .encrypt = ablkcipher_encrypt,
1903 .decrypt = ablkcipher_decrypt,
1904 .givencrypt = ablkcipher_givencrypt,
1905 .geniv = "<built-in>",
1906 .min_keysize = DES_KEY_SIZE,
1907 .max_keysize = DES_KEY_SIZE,
1908 .ivsize = DES_BLOCK_SIZE,
1909 },
1910 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1911 },
1912 {
1913 .name = "ctr(aes)",
1914 .driver_name = "ctr-aes-caam",
1915 .blocksize = 1,
1916 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1917 .template_ablkcipher = {
1918 .setkey = ablkcipher_setkey,
1919 .encrypt = ablkcipher_encrypt,
1920 .decrypt = ablkcipher_decrypt,
1921 .geniv = "chainiv",
1922 .min_keysize = AES_MIN_KEY_SIZE,
1923 .max_keysize = AES_MAX_KEY_SIZE,
1924 .ivsize = AES_BLOCK_SIZE,
1925 },
1926 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1927 },
1928 {
1929 .name = "rfc3686(ctr(aes))",
1930 .driver_name = "rfc3686-ctr-aes-caam",
1931 .blocksize = 1,
1932 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1933 .template_ablkcipher = {
1934 .setkey = ablkcipher_setkey,
1935 .encrypt = ablkcipher_encrypt,
1936 .decrypt = ablkcipher_decrypt,
1937 .givencrypt = ablkcipher_givencrypt,
1938 .geniv = "<built-in>",
1939 .min_keysize = AES_MIN_KEY_SIZE +
1940 CTR_RFC3686_NONCE_SIZE,
1941 .max_keysize = AES_MAX_KEY_SIZE +
1942 CTR_RFC3686_NONCE_SIZE,
1943 .ivsize = CTR_RFC3686_IV_SIZE,
1944 },
1945 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1946 },
1947 {
1948 .name = "xts(aes)",
1949 .driver_name = "xts-aes-caam",
1950 .blocksize = AES_BLOCK_SIZE,
1951 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1952 .template_ablkcipher = {
1953 .setkey = xts_ablkcipher_setkey,
1954 .encrypt = ablkcipher_encrypt,
1955 .decrypt = ablkcipher_decrypt,
1956 .geniv = "eseqiv",
1957 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1958 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1959 .ivsize = AES_BLOCK_SIZE,
1960 },
1961 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1962 },
1963};
1964
1965static struct caam_aead_alg driver_aeads[] = {
1966 {
1967 .aead = {
1968 .base = {
1969 .cra_name = "rfc4106(gcm(aes))",
1970 .cra_driver_name = "rfc4106-gcm-aes-caam",
1971 .cra_blocksize = 1,
1972 },
1973 .setkey = rfc4106_setkey,
1974 .setauthsize = rfc4106_setauthsize,
1975 .encrypt = ipsec_gcm_encrypt,
1976 .decrypt = ipsec_gcm_decrypt,
1977 .ivsize = GCM_RFC4106_IV_SIZE,
1978 .maxauthsize = AES_BLOCK_SIZE,
1979 },
1980 .caam = {
1981 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1982 },
1983 },
1984 {
1985 .aead = {
1986 .base = {
1987 .cra_name = "rfc4543(gcm(aes))",
1988 .cra_driver_name = "rfc4543-gcm-aes-caam",
1989 .cra_blocksize = 1,
1990 },
1991 .setkey = rfc4543_setkey,
1992 .setauthsize = rfc4543_setauthsize,
1993 .encrypt = ipsec_gcm_encrypt,
1994 .decrypt = ipsec_gcm_decrypt,
1995 .ivsize = GCM_RFC4543_IV_SIZE,
1996 .maxauthsize = AES_BLOCK_SIZE,
1997 },
1998 .caam = {
1999 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
2000 },
2001 },
2002 /* Galois Counter Mode */
2003 {
2004 .aead = {
2005 .base = {
2006 .cra_name = "gcm(aes)",
2007 .cra_driver_name = "gcm-aes-caam",
2008 .cra_blocksize = 1,
2009 },
2010 .setkey = gcm_setkey,
2011 .setauthsize = gcm_setauthsize,
2012 .encrypt = gcm_encrypt,
2013 .decrypt = gcm_decrypt,
2014 .ivsize = GCM_AES_IV_SIZE,
2015 .maxauthsize = AES_BLOCK_SIZE,
2016 },
2017 .caam = {
2018 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
2019 },
2020 },
2021 /* single-pass ipsec_esp descriptor */
2022 {
2023 .aead = {
2024 .base = {
2025 .cra_name = "authenc(hmac(md5),"
2026 "ecb(cipher_null))",
2027 .cra_driver_name = "authenc-hmac-md5-"
2028 "ecb-cipher_null-caam",
2029 .cra_blocksize = NULL_BLOCK_SIZE,
2030 },
2031 .setkey = aead_setkey,
2032 .setauthsize = aead_setauthsize,
2033 .encrypt = aead_encrypt,
2034 .decrypt = aead_decrypt,
2035 .ivsize = NULL_IV_SIZE,
2036 .maxauthsize = MD5_DIGEST_SIZE,
2037 },
2038 .caam = {
2039 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2040 OP_ALG_AAI_HMAC_PRECOMP,
2041 },
2042 },
2043 {
2044 .aead = {
2045 .base = {
2046 .cra_name = "authenc(hmac(sha1),"
2047 "ecb(cipher_null))",
2048 .cra_driver_name = "authenc-hmac-sha1-"
2049 "ecb-cipher_null-caam",
2050 .cra_blocksize = NULL_BLOCK_SIZE,
2051 },
2052 .setkey = aead_setkey,
2053 .setauthsize = aead_setauthsize,
2054 .encrypt = aead_encrypt,
2055 .decrypt = aead_decrypt,
2056 .ivsize = NULL_IV_SIZE,
2057 .maxauthsize = SHA1_DIGEST_SIZE,
2058 },
2059 .caam = {
2060 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2061 OP_ALG_AAI_HMAC_PRECOMP,
2062 },
2063 },
2064 {
2065 .aead = {
2066 .base = {
2067 .cra_name = "authenc(hmac(sha224),"
2068 "ecb(cipher_null))",
2069 .cra_driver_name = "authenc-hmac-sha224-"
2070 "ecb-cipher_null-caam",
2071 .cra_blocksize = NULL_BLOCK_SIZE,
2072 },
2073 .setkey = aead_setkey,
2074 .setauthsize = aead_setauthsize,
2075 .encrypt = aead_encrypt,
2076 .decrypt = aead_decrypt,
2077 .ivsize = NULL_IV_SIZE,
2078 .maxauthsize = SHA224_DIGEST_SIZE,
2079 },
2080 .caam = {
2081 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2082 OP_ALG_AAI_HMAC_PRECOMP,
2083 },
2084 },
2085 {
2086 .aead = {
2087 .base = {
2088 .cra_name = "authenc(hmac(sha256),"
2089 "ecb(cipher_null))",
2090 .cra_driver_name = "authenc-hmac-sha256-"
2091 "ecb-cipher_null-caam",
2092 .cra_blocksize = NULL_BLOCK_SIZE,
2093 },
2094 .setkey = aead_setkey,
2095 .setauthsize = aead_setauthsize,
2096 .encrypt = aead_encrypt,
2097 .decrypt = aead_decrypt,
2098 .ivsize = NULL_IV_SIZE,
2099 .maxauthsize = SHA256_DIGEST_SIZE,
2100 },
2101 .caam = {
2102 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2103 OP_ALG_AAI_HMAC_PRECOMP,
2104 },
2105 },
2106 {
2107 .aead = {
2108 .base = {
2109 .cra_name = "authenc(hmac(sha384),"
2110 "ecb(cipher_null))",
2111 .cra_driver_name = "authenc-hmac-sha384-"
2112 "ecb-cipher_null-caam",
2113 .cra_blocksize = NULL_BLOCK_SIZE,
2114 },
2115 .setkey = aead_setkey,
2116 .setauthsize = aead_setauthsize,
2117 .encrypt = aead_encrypt,
2118 .decrypt = aead_decrypt,
2119 .ivsize = NULL_IV_SIZE,
2120 .maxauthsize = SHA384_DIGEST_SIZE,
2121 },
2122 .caam = {
2123 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2124 OP_ALG_AAI_HMAC_PRECOMP,
2125 },
2126 },
2127 {
2128 .aead = {
2129 .base = {
2130 .cra_name = "authenc(hmac(sha512),"
2131 "ecb(cipher_null))",
2132 .cra_driver_name = "authenc-hmac-sha512-"
2133 "ecb-cipher_null-caam",
2134 .cra_blocksize = NULL_BLOCK_SIZE,
2135 },
2136 .setkey = aead_setkey,
2137 .setauthsize = aead_setauthsize,
2138 .encrypt = aead_encrypt,
2139 .decrypt = aead_decrypt,
2140 .ivsize = NULL_IV_SIZE,
2141 .maxauthsize = SHA512_DIGEST_SIZE,
2142 },
2143 .caam = {
2144 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2145 OP_ALG_AAI_HMAC_PRECOMP,
2146 },
2147 },
2148 {
2149 .aead = {
2150 .base = {
2151 .cra_name = "authenc(hmac(md5),cbc(aes))",
2152 .cra_driver_name = "authenc-hmac-md5-"
2153 "cbc-aes-caam",
2154 .cra_blocksize = AES_BLOCK_SIZE,
2155 },
2156 .setkey = aead_setkey,
2157 .setauthsize = aead_setauthsize,
2158 .encrypt = aead_encrypt,
2159 .decrypt = aead_decrypt,
2160 .ivsize = AES_BLOCK_SIZE,
2161 .maxauthsize = MD5_DIGEST_SIZE,
2162 },
2163 .caam = {
2164 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2165 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2166 OP_ALG_AAI_HMAC_PRECOMP,
2167 },
2168 },
2169 {
2170 .aead = {
2171 .base = {
2172 .cra_name = "echainiv(authenc(hmac(md5),"
2173 "cbc(aes)))",
2174 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2175 "cbc-aes-caam",
2176 .cra_blocksize = AES_BLOCK_SIZE,
2177 },
2178 .setkey = aead_setkey,
2179 .setauthsize = aead_setauthsize,
2180 .encrypt = aead_encrypt,
2181 .decrypt = aead_decrypt,
2182 .ivsize = AES_BLOCK_SIZE,
2183 .maxauthsize = MD5_DIGEST_SIZE,
2184 },
2185 .caam = {
2186 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2187 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2188 OP_ALG_AAI_HMAC_PRECOMP,
2189 .geniv = true,
2190 },
2191 },
2192 {
2193 .aead = {
2194 .base = {
2195 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2196 .cra_driver_name = "authenc-hmac-sha1-"
2197 "cbc-aes-caam",
2198 .cra_blocksize = AES_BLOCK_SIZE,
2199 },
2200 .setkey = aead_setkey,
2201 .setauthsize = aead_setauthsize,
2202 .encrypt = aead_encrypt,
2203 .decrypt = aead_decrypt,
2204 .ivsize = AES_BLOCK_SIZE,
2205 .maxauthsize = SHA1_DIGEST_SIZE,
2206 },
2207 .caam = {
2208 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2209 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2210 OP_ALG_AAI_HMAC_PRECOMP,
2211 },
2212 },
2213 {
2214 .aead = {
2215 .base = {
2216 .cra_name = "echainiv(authenc(hmac(sha1),"
2217 "cbc(aes)))",
2218 .cra_driver_name = "echainiv-authenc-"
2219 "hmac-sha1-cbc-aes-caam",
2220 .cra_blocksize = AES_BLOCK_SIZE,
2221 },
2222 .setkey = aead_setkey,
2223 .setauthsize = aead_setauthsize,
2224 .encrypt = aead_encrypt,
2225 .decrypt = aead_decrypt,
2226 .ivsize = AES_BLOCK_SIZE,
2227 .maxauthsize = SHA1_DIGEST_SIZE,
2228 },
2229 .caam = {
2230 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2231 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2232 OP_ALG_AAI_HMAC_PRECOMP,
2233 .geniv = true,
2234 },
2235 },
2236 {
2237 .aead = {
2238 .base = {
2239 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2240 .cra_driver_name = "authenc-hmac-sha224-"
2241 "cbc-aes-caam",
2242 .cra_blocksize = AES_BLOCK_SIZE,
2243 },
2244 .setkey = aead_setkey,
2245 .setauthsize = aead_setauthsize,
2246 .encrypt = aead_encrypt,
2247 .decrypt = aead_decrypt,
2248 .ivsize = AES_BLOCK_SIZE,
2249 .maxauthsize = SHA224_DIGEST_SIZE,
2250 },
2251 .caam = {
2252 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2253 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2254 OP_ALG_AAI_HMAC_PRECOMP,
2255 },
2256 },
2257 {
2258 .aead = {
2259 .base = {
2260 .cra_name = "echainiv(authenc(hmac(sha224),"
2261 "cbc(aes)))",
2262 .cra_driver_name = "echainiv-authenc-"
2263 "hmac-sha224-cbc-aes-caam",
2264 .cra_blocksize = AES_BLOCK_SIZE,
2265 },
2266 .setkey = aead_setkey,
2267 .setauthsize = aead_setauthsize,
2268 .encrypt = aead_encrypt,
2269 .decrypt = aead_decrypt,
2270 .ivsize = AES_BLOCK_SIZE,
2271 .maxauthsize = SHA224_DIGEST_SIZE,
2272 },
2273 .caam = {
2274 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2275 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2276 OP_ALG_AAI_HMAC_PRECOMP,
2277 .geniv = true,
2278 },
2279 },
2280 {
2281 .aead = {
2282 .base = {
2283 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2284 .cra_driver_name = "authenc-hmac-sha256-"
2285 "cbc-aes-caam",
2286 .cra_blocksize = AES_BLOCK_SIZE,
2287 },
2288 .setkey = aead_setkey,
2289 .setauthsize = aead_setauthsize,
2290 .encrypt = aead_encrypt,
2291 .decrypt = aead_decrypt,
2292 .ivsize = AES_BLOCK_SIZE,
2293 .maxauthsize = SHA256_DIGEST_SIZE,
2294 },
2295 .caam = {
2296 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2297 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2298 OP_ALG_AAI_HMAC_PRECOMP,
2299 },
2300 },
2301 {
2302 .aead = {
2303 .base = {
2304 .cra_name = "echainiv(authenc(hmac(sha256),"
2305 "cbc(aes)))",
2306 .cra_driver_name = "echainiv-authenc-"
2307 "hmac-sha256-cbc-aes-caam",
2308 .cra_blocksize = AES_BLOCK_SIZE,
2309 },
2310 .setkey = aead_setkey,
2311 .setauthsize = aead_setauthsize,
2312 .encrypt = aead_encrypt,
2313 .decrypt = aead_decrypt,
2314 .ivsize = AES_BLOCK_SIZE,
2315 .maxauthsize = SHA256_DIGEST_SIZE,
2316 },
2317 .caam = {
2318 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2319 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2320 OP_ALG_AAI_HMAC_PRECOMP,
2321 .geniv = true,
2322 },
2323 },
2324 {
2325 .aead = {
2326 .base = {
2327 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2328 .cra_driver_name = "authenc-hmac-sha384-"
2329 "cbc-aes-caam",
2330 .cra_blocksize = AES_BLOCK_SIZE,
2331 },
2332 .setkey = aead_setkey,
2333 .setauthsize = aead_setauthsize,
2334 .encrypt = aead_encrypt,
2335 .decrypt = aead_decrypt,
2336 .ivsize = AES_BLOCK_SIZE,
2337 .maxauthsize = SHA384_DIGEST_SIZE,
2338 },
2339 .caam = {
2340 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2341 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2342 OP_ALG_AAI_HMAC_PRECOMP,
2343 },
2344 },
2345 {
2346 .aead = {
2347 .base = {
2348 .cra_name = "echainiv(authenc(hmac(sha384),"
2349 "cbc(aes)))",
2350 .cra_driver_name = "echainiv-authenc-"
2351 "hmac-sha384-cbc-aes-caam",
2352 .cra_blocksize = AES_BLOCK_SIZE,
2353 },
2354 .setkey = aead_setkey,
2355 .setauthsize = aead_setauthsize,
2356 .encrypt = aead_encrypt,
2357 .decrypt = aead_decrypt,
2358 .ivsize = AES_BLOCK_SIZE,
2359 .maxauthsize = SHA384_DIGEST_SIZE,
2360 },
2361 .caam = {
2362 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2363 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2364 OP_ALG_AAI_HMAC_PRECOMP,
2365 .geniv = true,
2366 },
2367 },
2368 {
2369 .aead = {
2370 .base = {
2371 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2372 .cra_driver_name = "authenc-hmac-sha512-"
2373 "cbc-aes-caam",
2374 .cra_blocksize = AES_BLOCK_SIZE,
2375 },
2376 .setkey = aead_setkey,
2377 .setauthsize = aead_setauthsize,
2378 .encrypt = aead_encrypt,
2379 .decrypt = aead_decrypt,
2380 .ivsize = AES_BLOCK_SIZE,
2381 .maxauthsize = SHA512_DIGEST_SIZE,
2382 },
2383 .caam = {
2384 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2385 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2386 OP_ALG_AAI_HMAC_PRECOMP,
2387 },
2388 },
2389 {
2390 .aead = {
2391 .base = {
2392 .cra_name = "echainiv(authenc(hmac(sha512),"
2393 "cbc(aes)))",
2394 .cra_driver_name = "echainiv-authenc-"
2395 "hmac-sha512-cbc-aes-caam",
2396 .cra_blocksize = AES_BLOCK_SIZE,
2397 },
2398 .setkey = aead_setkey,
2399 .setauthsize = aead_setauthsize,
2400 .encrypt = aead_encrypt,
2401 .decrypt = aead_decrypt,
2402 .ivsize = AES_BLOCK_SIZE,
2403 .maxauthsize = SHA512_DIGEST_SIZE,
2404 },
2405 .caam = {
2406 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2407 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2408 OP_ALG_AAI_HMAC_PRECOMP,
2409 .geniv = true,
2410 },
2411 },
2412 {
2413 .aead = {
2414 .base = {
2415 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2416 .cra_driver_name = "authenc-hmac-md5-"
2417 "cbc-des3_ede-caam",
2418 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2419 },
2420 .setkey = aead_setkey,
2421 .setauthsize = aead_setauthsize,
2422 .encrypt = aead_encrypt,
2423 .decrypt = aead_decrypt,
2424 .ivsize = DES3_EDE_BLOCK_SIZE,
2425 .maxauthsize = MD5_DIGEST_SIZE,
2426 },
2427 .caam = {
2428 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2429 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2430 OP_ALG_AAI_HMAC_PRECOMP,
2431 }
2432 },
2433 {
2434 .aead = {
2435 .base = {
2436 .cra_name = "echainiv(authenc(hmac(md5),"
2437 "cbc(des3_ede)))",
2438 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2439 "cbc-des3_ede-caam",
2440 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2441 },
2442 .setkey = aead_setkey,
2443 .setauthsize = aead_setauthsize,
2444 .encrypt = aead_encrypt,
2445 .decrypt = aead_decrypt,
2446 .ivsize = DES3_EDE_BLOCK_SIZE,
2447 .maxauthsize = MD5_DIGEST_SIZE,
2448 },
2449 .caam = {
2450 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2451 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2452 OP_ALG_AAI_HMAC_PRECOMP,
2453 .geniv = true,
2454 }
2455 },
2456 {
2457 .aead = {
2458 .base = {
2459 .cra_name = "authenc(hmac(sha1),"
2460 "cbc(des3_ede))",
2461 .cra_driver_name = "authenc-hmac-sha1-"
2462 "cbc-des3_ede-caam",
2463 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2464 },
2465 .setkey = aead_setkey,
2466 .setauthsize = aead_setauthsize,
2467 .encrypt = aead_encrypt,
2468 .decrypt = aead_decrypt,
2469 .ivsize = DES3_EDE_BLOCK_SIZE,
2470 .maxauthsize = SHA1_DIGEST_SIZE,
2471 },
2472 .caam = {
2473 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2474 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2475 OP_ALG_AAI_HMAC_PRECOMP,
2476 },
2477 },
2478 {
2479 .aead = {
2480 .base = {
2481 .cra_name = "echainiv(authenc(hmac(sha1),"
2482 "cbc(des3_ede)))",
2483 .cra_driver_name = "echainiv-authenc-"
2484 "hmac-sha1-"
2485 "cbc-des3_ede-caam",
2486 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2487 },
2488 .setkey = aead_setkey,
2489 .setauthsize = aead_setauthsize,
2490 .encrypt = aead_encrypt,
2491 .decrypt = aead_decrypt,
2492 .ivsize = DES3_EDE_BLOCK_SIZE,
2493 .maxauthsize = SHA1_DIGEST_SIZE,
2494 },
2495 .caam = {
2496 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2497 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2498 OP_ALG_AAI_HMAC_PRECOMP,
2499 .geniv = true,
2500 },
2501 },
2502 {
2503 .aead = {
2504 .base = {
2505 .cra_name = "authenc(hmac(sha224),"
2506 "cbc(des3_ede))",
2507 .cra_driver_name = "authenc-hmac-sha224-"
2508 "cbc-des3_ede-caam",
2509 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2510 },
2511 .setkey = aead_setkey,
2512 .setauthsize = aead_setauthsize,
2513 .encrypt = aead_encrypt,
2514 .decrypt = aead_decrypt,
2515 .ivsize = DES3_EDE_BLOCK_SIZE,
2516 .maxauthsize = SHA224_DIGEST_SIZE,
2517 },
2518 .caam = {
2519 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2520 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2521 OP_ALG_AAI_HMAC_PRECOMP,
2522 },
2523 },
2524 {
2525 .aead = {
2526 .base = {
2527 .cra_name = "echainiv(authenc(hmac(sha224),"
2528 "cbc(des3_ede)))",
2529 .cra_driver_name = "echainiv-authenc-"
2530 "hmac-sha224-"
2531 "cbc-des3_ede-caam",
2532 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2533 },
2534 .setkey = aead_setkey,
2535 .setauthsize = aead_setauthsize,
2536 .encrypt = aead_encrypt,
2537 .decrypt = aead_decrypt,
2538 .ivsize = DES3_EDE_BLOCK_SIZE,
2539 .maxauthsize = SHA224_DIGEST_SIZE,
2540 },
2541 .caam = {
2542 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2543 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2544 OP_ALG_AAI_HMAC_PRECOMP,
2545 .geniv = true,
2546 },
2547 },
2548 {
2549 .aead = {
2550 .base = {
2551 .cra_name = "authenc(hmac(sha256),"
2552 "cbc(des3_ede))",
2553 .cra_driver_name = "authenc-hmac-sha256-"
2554 "cbc-des3_ede-caam",
2555 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2556 },
2557 .setkey = aead_setkey,
2558 .setauthsize = aead_setauthsize,
2559 .encrypt = aead_encrypt,
2560 .decrypt = aead_decrypt,
2561 .ivsize = DES3_EDE_BLOCK_SIZE,
2562 .maxauthsize = SHA256_DIGEST_SIZE,
2563 },
2564 .caam = {
2565 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2566 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2567 OP_ALG_AAI_HMAC_PRECOMP,
2568 },
2569 },
2570 {
2571 .aead = {
2572 .base = {
2573 .cra_name = "echainiv(authenc(hmac(sha256),"
2574 "cbc(des3_ede)))",
2575 .cra_driver_name = "echainiv-authenc-"
2576 "hmac-sha256-"
2577 "cbc-des3_ede-caam",
2578 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2579 },
2580 .setkey = aead_setkey,
2581 .setauthsize = aead_setauthsize,
2582 .encrypt = aead_encrypt,
2583 .decrypt = aead_decrypt,
2584 .ivsize = DES3_EDE_BLOCK_SIZE,
2585 .maxauthsize = SHA256_DIGEST_SIZE,
2586 },
2587 .caam = {
2588 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2589 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2590 OP_ALG_AAI_HMAC_PRECOMP,
2591 .geniv = true,
2592 },
2593 },
2594 {
2595 .aead = {
2596 .base = {
2597 .cra_name = "authenc(hmac(sha384),"
2598 "cbc(des3_ede))",
2599 .cra_driver_name = "authenc-hmac-sha384-"
2600 "cbc-des3_ede-caam",
2601 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2602 },
2603 .setkey = aead_setkey,
2604 .setauthsize = aead_setauthsize,
2605 .encrypt = aead_encrypt,
2606 .decrypt = aead_decrypt,
2607 .ivsize = DES3_EDE_BLOCK_SIZE,
2608 .maxauthsize = SHA384_DIGEST_SIZE,
2609 },
2610 .caam = {
2611 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2612 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2613 OP_ALG_AAI_HMAC_PRECOMP,
2614 },
2615 },
2616 {
2617 .aead = {
2618 .base = {
2619 .cra_name = "echainiv(authenc(hmac(sha384),"
2620 "cbc(des3_ede)))",
2621 .cra_driver_name = "echainiv-authenc-"
2622 "hmac-sha384-"
2623 "cbc-des3_ede-caam",
2624 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2625 },
2626 .setkey = aead_setkey,
2627 .setauthsize = aead_setauthsize,
2628 .encrypt = aead_encrypt,
2629 .decrypt = aead_decrypt,
2630 .ivsize = DES3_EDE_BLOCK_SIZE,
2631 .maxauthsize = SHA384_DIGEST_SIZE,
2632 },
2633 .caam = {
2634 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2635 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2636 OP_ALG_AAI_HMAC_PRECOMP,
2637 .geniv = true,
2638 },
2639 },
2640 {
2641 .aead = {
2642 .base = {
2643 .cra_name = "authenc(hmac(sha512),"
2644 "cbc(des3_ede))",
2645 .cra_driver_name = "authenc-hmac-sha512-"
2646 "cbc-des3_ede-caam",
2647 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2648 },
2649 .setkey = aead_setkey,
2650 .setauthsize = aead_setauthsize,
2651 .encrypt = aead_encrypt,
2652 .decrypt = aead_decrypt,
2653 .ivsize = DES3_EDE_BLOCK_SIZE,
2654 .maxauthsize = SHA512_DIGEST_SIZE,
2655 },
2656 .caam = {
2657 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2658 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2659 OP_ALG_AAI_HMAC_PRECOMP,
2660 },
2661 },
2662 {
2663 .aead = {
2664 .base = {
2665 .cra_name = "echainiv(authenc(hmac(sha512),"
2666 "cbc(des3_ede)))",
2667 .cra_driver_name = "echainiv-authenc-"
2668 "hmac-sha512-"
2669 "cbc-des3_ede-caam",
2670 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2671 },
2672 .setkey = aead_setkey,
2673 .setauthsize = aead_setauthsize,
2674 .encrypt = aead_encrypt,
2675 .decrypt = aead_decrypt,
2676 .ivsize = DES3_EDE_BLOCK_SIZE,
2677 .maxauthsize = SHA512_DIGEST_SIZE,
2678 },
2679 .caam = {
2680 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2681 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2682 OP_ALG_AAI_HMAC_PRECOMP,
2683 .geniv = true,
2684 },
2685 },
2686 {
2687 .aead = {
2688 .base = {
2689 .cra_name = "authenc(hmac(md5),cbc(des))",
2690 .cra_driver_name = "authenc-hmac-md5-"
2691 "cbc-des-caam",
2692 .cra_blocksize = DES_BLOCK_SIZE,
2693 },
2694 .setkey = aead_setkey,
2695 .setauthsize = aead_setauthsize,
2696 .encrypt = aead_encrypt,
2697 .decrypt = aead_decrypt,
2698 .ivsize = DES_BLOCK_SIZE,
2699 .maxauthsize = MD5_DIGEST_SIZE,
2700 },
2701 .caam = {
2702 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2703 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2704 OP_ALG_AAI_HMAC_PRECOMP,
2705 },
2706 },
2707 {
2708 .aead = {
2709 .base = {
2710 .cra_name = "echainiv(authenc(hmac(md5),"
2711 "cbc(des)))",
2712 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2713 "cbc-des-caam",
2714 .cra_blocksize = DES_BLOCK_SIZE,
2715 },
2716 .setkey = aead_setkey,
2717 .setauthsize = aead_setauthsize,
2718 .encrypt = aead_encrypt,
2719 .decrypt = aead_decrypt,
2720 .ivsize = DES_BLOCK_SIZE,
2721 .maxauthsize = MD5_DIGEST_SIZE,
2722 },
2723 .caam = {
2724 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2725 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2726 OP_ALG_AAI_HMAC_PRECOMP,
2727 .geniv = true,
2728 },
2729 },
2730 {
2731 .aead = {
2732 .base = {
2733 .cra_name = "authenc(hmac(sha1),cbc(des))",
2734 .cra_driver_name = "authenc-hmac-sha1-"
2735 "cbc-des-caam",
2736 .cra_blocksize = DES_BLOCK_SIZE,
2737 },
2738 .setkey = aead_setkey,
2739 .setauthsize = aead_setauthsize,
2740 .encrypt = aead_encrypt,
2741 .decrypt = aead_decrypt,
2742 .ivsize = DES_BLOCK_SIZE,
2743 .maxauthsize = SHA1_DIGEST_SIZE,
2744 },
2745 .caam = {
2746 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2747 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2748 OP_ALG_AAI_HMAC_PRECOMP,
2749 },
2750 },
2751 {
2752 .aead = {
2753 .base = {
2754 .cra_name = "echainiv(authenc(hmac(sha1),"
2755 "cbc(des)))",
2756 .cra_driver_name = "echainiv-authenc-"
2757 "hmac-sha1-cbc-des-caam",
2758 .cra_blocksize = DES_BLOCK_SIZE,
2759 },
2760 .setkey = aead_setkey,
2761 .setauthsize = aead_setauthsize,
2762 .encrypt = aead_encrypt,
2763 .decrypt = aead_decrypt,
2764 .ivsize = DES_BLOCK_SIZE,
2765 .maxauthsize = SHA1_DIGEST_SIZE,
2766 },
2767 .caam = {
2768 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2769 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2770 OP_ALG_AAI_HMAC_PRECOMP,
2771 .geniv = true,
2772 },
2773 },
2774 {
2775 .aead = {
2776 .base = {
2777 .cra_name = "authenc(hmac(sha224),cbc(des))",
2778 .cra_driver_name = "authenc-hmac-sha224-"
2779 "cbc-des-caam",
2780 .cra_blocksize = DES_BLOCK_SIZE,
2781 },
2782 .setkey = aead_setkey,
2783 .setauthsize = aead_setauthsize,
2784 .encrypt = aead_encrypt,
2785 .decrypt = aead_decrypt,
2786 .ivsize = DES_BLOCK_SIZE,
2787 .maxauthsize = SHA224_DIGEST_SIZE,
2788 },
2789 .caam = {
2790 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2791 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2792 OP_ALG_AAI_HMAC_PRECOMP,
2793 },
2794 },
2795 {
2796 .aead = {
2797 .base = {
2798 .cra_name = "echainiv(authenc(hmac(sha224),"
2799 "cbc(des)))",
2800 .cra_driver_name = "echainiv-authenc-"
2801 "hmac-sha224-cbc-des-caam",
2802 .cra_blocksize = DES_BLOCK_SIZE,
2803 },
2804 .setkey = aead_setkey,
2805 .setauthsize = aead_setauthsize,
2806 .encrypt = aead_encrypt,
2807 .decrypt = aead_decrypt,
2808 .ivsize = DES_BLOCK_SIZE,
2809 .maxauthsize = SHA224_DIGEST_SIZE,
2810 },
2811 .caam = {
2812 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2813 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2814 OP_ALG_AAI_HMAC_PRECOMP,
2815 .geniv = true,
2816 },
2817 },
2818 {
2819 .aead = {
2820 .base = {
2821 .cra_name = "authenc(hmac(sha256),cbc(des))",
2822 .cra_driver_name = "authenc-hmac-sha256-"
2823 "cbc-des-caam",
2824 .cra_blocksize = DES_BLOCK_SIZE,
2825 },
2826 .setkey = aead_setkey,
2827 .setauthsize = aead_setauthsize,
2828 .encrypt = aead_encrypt,
2829 .decrypt = aead_decrypt,
2830 .ivsize = DES_BLOCK_SIZE,
2831 .maxauthsize = SHA256_DIGEST_SIZE,
2832 },
2833 .caam = {
2834 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2835 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2836 OP_ALG_AAI_HMAC_PRECOMP,
2837 },
2838 },
2839 {
2840 .aead = {
2841 .base = {
2842 .cra_name = "echainiv(authenc(hmac(sha256),"
2843 "cbc(des)))",
2844 .cra_driver_name = "echainiv-authenc-"
2845 "hmac-sha256-cbc-des-caam",
2846 .cra_blocksize = DES_BLOCK_SIZE,
2847 },
2848 .setkey = aead_setkey,
2849 .setauthsize = aead_setauthsize,
2850 .encrypt = aead_encrypt,
2851 .decrypt = aead_decrypt,
2852 .ivsize = DES_BLOCK_SIZE,
2853 .maxauthsize = SHA256_DIGEST_SIZE,
2854 },
2855 .caam = {
2856 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2857 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2858 OP_ALG_AAI_HMAC_PRECOMP,
2859 .geniv = true,
2860 },
2861 },
2862 {
2863 .aead = {
2864 .base = {
2865 .cra_name = "authenc(hmac(sha384),cbc(des))",
2866 .cra_driver_name = "authenc-hmac-sha384-"
2867 "cbc-des-caam",
2868 .cra_blocksize = DES_BLOCK_SIZE,
2869 },
2870 .setkey = aead_setkey,
2871 .setauthsize = aead_setauthsize,
2872 .encrypt = aead_encrypt,
2873 .decrypt = aead_decrypt,
2874 .ivsize = DES_BLOCK_SIZE,
2875 .maxauthsize = SHA384_DIGEST_SIZE,
2876 },
2877 .caam = {
2878 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2879 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2880 OP_ALG_AAI_HMAC_PRECOMP,
2881 },
2882 },
2883 {
2884 .aead = {
2885 .base = {
2886 .cra_name = "echainiv(authenc(hmac(sha384),"
2887 "cbc(des)))",
2888 .cra_driver_name = "echainiv-authenc-"
2889 "hmac-sha384-cbc-des-caam",
2890 .cra_blocksize = DES_BLOCK_SIZE,
2891 },
2892 .setkey = aead_setkey,
2893 .setauthsize = aead_setauthsize,
2894 .encrypt = aead_encrypt,
2895 .decrypt = aead_decrypt,
2896 .ivsize = DES_BLOCK_SIZE,
2897 .maxauthsize = SHA384_DIGEST_SIZE,
2898 },
2899 .caam = {
2900 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2901 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2902 OP_ALG_AAI_HMAC_PRECOMP,
2903 .geniv = true,
2904 },
2905 },
2906 {
2907 .aead = {
2908 .base = {
2909 .cra_name = "authenc(hmac(sha512),cbc(des))",
2910 .cra_driver_name = "authenc-hmac-sha512-"
2911 "cbc-des-caam",
2912 .cra_blocksize = DES_BLOCK_SIZE,
2913 },
2914 .setkey = aead_setkey,
2915 .setauthsize = aead_setauthsize,
2916 .encrypt = aead_encrypt,
2917 .decrypt = aead_decrypt,
2918 .ivsize = DES_BLOCK_SIZE,
2919 .maxauthsize = SHA512_DIGEST_SIZE,
2920 },
2921 .caam = {
2922 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2923 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2924 OP_ALG_AAI_HMAC_PRECOMP,
2925 },
2926 },
2927 {
2928 .aead = {
2929 .base = {
2930 .cra_name = "echainiv(authenc(hmac(sha512),"
2931 "cbc(des)))",
2932 .cra_driver_name = "echainiv-authenc-"
2933 "hmac-sha512-cbc-des-caam",
2934 .cra_blocksize = DES_BLOCK_SIZE,
2935 },
2936 .setkey = aead_setkey,
2937 .setauthsize = aead_setauthsize,
2938 .encrypt = aead_encrypt,
2939 .decrypt = aead_decrypt,
2940 .ivsize = DES_BLOCK_SIZE,
2941 .maxauthsize = SHA512_DIGEST_SIZE,
2942 },
2943 .caam = {
2944 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2945 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2946 OP_ALG_AAI_HMAC_PRECOMP,
2947 .geniv = true,
2948 },
2949 },
2950 {
2951 .aead = {
2952 .base = {
2953 .cra_name = "authenc(hmac(md5),"
2954 "rfc3686(ctr(aes)))",
2955 .cra_driver_name = "authenc-hmac-md5-"
2956 "rfc3686-ctr-aes-caam",
2957 .cra_blocksize = 1,
2958 },
2959 .setkey = aead_setkey,
2960 .setauthsize = aead_setauthsize,
2961 .encrypt = aead_encrypt,
2962 .decrypt = aead_decrypt,
2963 .ivsize = CTR_RFC3686_IV_SIZE,
2964 .maxauthsize = MD5_DIGEST_SIZE,
2965 },
2966 .caam = {
2967 .class1_alg_type = OP_ALG_ALGSEL_AES |
2968 OP_ALG_AAI_CTR_MOD128,
2969 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2970 OP_ALG_AAI_HMAC_PRECOMP,
2971 .rfc3686 = true,
2972 },
2973 },
2974 {
2975 .aead = {
2976 .base = {
2977 .cra_name = "seqiv(authenc("
2978 "hmac(md5),rfc3686(ctr(aes))))",
2979 .cra_driver_name = "seqiv-authenc-hmac-md5-"
2980 "rfc3686-ctr-aes-caam",
2981 .cra_blocksize = 1,
2982 },
2983 .setkey = aead_setkey,
2984 .setauthsize = aead_setauthsize,
2985 .encrypt = aead_encrypt,
2986 .decrypt = aead_decrypt,
2987 .ivsize = CTR_RFC3686_IV_SIZE,
2988 .maxauthsize = MD5_DIGEST_SIZE,
2989 },
2990 .caam = {
2991 .class1_alg_type = OP_ALG_ALGSEL_AES |
2992 OP_ALG_AAI_CTR_MOD128,
2993 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2994 OP_ALG_AAI_HMAC_PRECOMP,
2995 .rfc3686 = true,
2996 .geniv = true,
2997 },
2998 },
2999 {
3000 .aead = {
3001 .base = {
3002 .cra_name = "authenc(hmac(sha1),"
3003 "rfc3686(ctr(aes)))",
3004 .cra_driver_name = "authenc-hmac-sha1-"
3005 "rfc3686-ctr-aes-caam",
3006 .cra_blocksize = 1,
3007 },
3008 .setkey = aead_setkey,
3009 .setauthsize = aead_setauthsize,
3010 .encrypt = aead_encrypt,
3011 .decrypt = aead_decrypt,
3012 .ivsize = CTR_RFC3686_IV_SIZE,
3013 .maxauthsize = SHA1_DIGEST_SIZE,
3014 },
3015 .caam = {
3016 .class1_alg_type = OP_ALG_ALGSEL_AES |
3017 OP_ALG_AAI_CTR_MOD128,
3018 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3019 OP_ALG_AAI_HMAC_PRECOMP,
3020 .rfc3686 = true,
3021 },
3022 },
3023 {
3024 .aead = {
3025 .base = {
3026 .cra_name = "seqiv(authenc("
3027 "hmac(sha1),rfc3686(ctr(aes))))",
3028 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
3029 "rfc3686-ctr-aes-caam",
3030 .cra_blocksize = 1,
3031 },
3032 .setkey = aead_setkey,
3033 .setauthsize = aead_setauthsize,
3034 .encrypt = aead_encrypt,
3035 .decrypt = aead_decrypt,
3036 .ivsize = CTR_RFC3686_IV_SIZE,
3037 .maxauthsize = SHA1_DIGEST_SIZE,
3038 },
3039 .caam = {
3040 .class1_alg_type = OP_ALG_ALGSEL_AES |
3041 OP_ALG_AAI_CTR_MOD128,
3042 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
3043 OP_ALG_AAI_HMAC_PRECOMP,
3044 .rfc3686 = true,
3045 .geniv = true,
3046 },
3047 },
3048 {
3049 .aead = {
3050 .base = {
3051 .cra_name = "authenc(hmac(sha224),"
3052 "rfc3686(ctr(aes)))",
3053 .cra_driver_name = "authenc-hmac-sha224-"
3054 "rfc3686-ctr-aes-caam",
3055 .cra_blocksize = 1,
3056 },
3057 .setkey = aead_setkey,
3058 .setauthsize = aead_setauthsize,
3059 .encrypt = aead_encrypt,
3060 .decrypt = aead_decrypt,
3061 .ivsize = CTR_RFC3686_IV_SIZE,
3062 .maxauthsize = SHA224_DIGEST_SIZE,
3063 },
3064 .caam = {
3065 .class1_alg_type = OP_ALG_ALGSEL_AES |
3066 OP_ALG_AAI_CTR_MOD128,
3067 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3068 OP_ALG_AAI_HMAC_PRECOMP,
3069 .rfc3686 = true,
3070 },
3071 },
3072 {
3073 .aead = {
3074 .base = {
3075 .cra_name = "seqiv(authenc("
3076 "hmac(sha224),rfc3686(ctr(aes))))",
3077 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
3078 "rfc3686-ctr-aes-caam",
3079 .cra_blocksize = 1,
3080 },
3081 .setkey = aead_setkey,
3082 .setauthsize = aead_setauthsize,
3083 .encrypt = aead_encrypt,
3084 .decrypt = aead_decrypt,
3085 .ivsize = CTR_RFC3686_IV_SIZE,
3086 .maxauthsize = SHA224_DIGEST_SIZE,
3087 },
3088 .caam = {
3089 .class1_alg_type = OP_ALG_ALGSEL_AES |
3090 OP_ALG_AAI_CTR_MOD128,
3091 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
3092 OP_ALG_AAI_HMAC_PRECOMP,
3093 .rfc3686 = true,
3094 .geniv = true,
3095 },
3096 },
3097 {
3098 .aead = {
3099 .base = {
3100 .cra_name = "authenc(hmac(sha256),"
3101 "rfc3686(ctr(aes)))",
3102 .cra_driver_name = "authenc-hmac-sha256-"
3103 "rfc3686-ctr-aes-caam",
3104 .cra_blocksize = 1,
3105 },
3106 .setkey = aead_setkey,
3107 .setauthsize = aead_setauthsize,
3108 .encrypt = aead_encrypt,
3109 .decrypt = aead_decrypt,
3110 .ivsize = CTR_RFC3686_IV_SIZE,
3111 .maxauthsize = SHA256_DIGEST_SIZE,
3112 },
3113 .caam = {
3114 .class1_alg_type = OP_ALG_ALGSEL_AES |
3115 OP_ALG_AAI_CTR_MOD128,
3116 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3117 OP_ALG_AAI_HMAC_PRECOMP,
3118 .rfc3686 = true,
3119 },
3120 },
3121 {
3122 .aead = {
3123 .base = {
3124 .cra_name = "seqiv(authenc(hmac(sha256),"
3125 "rfc3686(ctr(aes))))",
3126 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
3127 "rfc3686-ctr-aes-caam",
3128 .cra_blocksize = 1,
3129 },
3130 .setkey = aead_setkey,
3131 .setauthsize = aead_setauthsize,
3132 .encrypt = aead_encrypt,
3133 .decrypt = aead_decrypt,
3134 .ivsize = CTR_RFC3686_IV_SIZE,
3135 .maxauthsize = SHA256_DIGEST_SIZE,
3136 },
3137 .caam = {
3138 .class1_alg_type = OP_ALG_ALGSEL_AES |
3139 OP_ALG_AAI_CTR_MOD128,
3140 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
3141 OP_ALG_AAI_HMAC_PRECOMP,
3142 .rfc3686 = true,
3143 .geniv = true,
3144 },
3145 },
3146 {
3147 .aead = {
3148 .base = {
3149 .cra_name = "authenc(hmac(sha384),"
3150 "rfc3686(ctr(aes)))",
3151 .cra_driver_name = "authenc-hmac-sha384-"
3152 "rfc3686-ctr-aes-caam",
3153 .cra_blocksize = 1,
3154 },
3155 .setkey = aead_setkey,
3156 .setauthsize = aead_setauthsize,
3157 .encrypt = aead_encrypt,
3158 .decrypt = aead_decrypt,
3159 .ivsize = CTR_RFC3686_IV_SIZE,
3160 .maxauthsize = SHA384_DIGEST_SIZE,
3161 },
3162 .caam = {
3163 .class1_alg_type = OP_ALG_ALGSEL_AES |
3164 OP_ALG_AAI_CTR_MOD128,
3165 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3166 OP_ALG_AAI_HMAC_PRECOMP,
3167 .rfc3686 = true,
3168 },
3169 },
3170 {
3171 .aead = {
3172 .base = {
3173 .cra_name = "seqiv(authenc(hmac(sha384),"
3174 "rfc3686(ctr(aes))))",
3175 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
3176 "rfc3686-ctr-aes-caam",
3177 .cra_blocksize = 1,
3178 },
3179 .setkey = aead_setkey,
3180 .setauthsize = aead_setauthsize,
3181 .encrypt = aead_encrypt,
3182 .decrypt = aead_decrypt,
3183 .ivsize = CTR_RFC3686_IV_SIZE,
3184 .maxauthsize = SHA384_DIGEST_SIZE,
3185 },
3186 .caam = {
3187 .class1_alg_type = OP_ALG_ALGSEL_AES |
3188 OP_ALG_AAI_CTR_MOD128,
3189 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
3190 OP_ALG_AAI_HMAC_PRECOMP,
3191 .rfc3686 = true,
3192 .geniv = true,
3193 },
3194 },
3195 {
3196 .aead = {
3197 .base = {
3198 .cra_name = "authenc(hmac(sha512),"
3199 "rfc3686(ctr(aes)))",
3200 .cra_driver_name = "authenc-hmac-sha512-"
3201 "rfc3686-ctr-aes-caam",
3202 .cra_blocksize = 1,
3203 },
3204 .setkey = aead_setkey,
3205 .setauthsize = aead_setauthsize,
3206 .encrypt = aead_encrypt,
3207 .decrypt = aead_decrypt,
3208 .ivsize = CTR_RFC3686_IV_SIZE,
3209 .maxauthsize = SHA512_DIGEST_SIZE,
3210 },
3211 .caam = {
3212 .class1_alg_type = OP_ALG_ALGSEL_AES |
3213 OP_ALG_AAI_CTR_MOD128,
3214 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3215 OP_ALG_AAI_HMAC_PRECOMP,
3216 .rfc3686 = true,
3217 },
3218 },
3219 {
3220 .aead = {
3221 .base = {
3222 .cra_name = "seqiv(authenc(hmac(sha512),"
3223 "rfc3686(ctr(aes))))",
3224 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
3225 "rfc3686-ctr-aes-caam",
3226 .cra_blocksize = 1,
3227 },
3228 .setkey = aead_setkey,
3229 .setauthsize = aead_setauthsize,
3230 .encrypt = aead_encrypt,
3231 .decrypt = aead_decrypt,
3232 .ivsize = CTR_RFC3686_IV_SIZE,
3233 .maxauthsize = SHA512_DIGEST_SIZE,
3234 },
3235 .caam = {
3236 .class1_alg_type = OP_ALG_ALGSEL_AES |
3237 OP_ALG_AAI_CTR_MOD128,
3238 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3239 OP_ALG_AAI_HMAC_PRECOMP,
3240 .rfc3686 = true,
3241 .geniv = true,
3242 },
3243 },
3244};
3245
3246struct caam_crypto_alg {
3247 struct crypto_alg crypto_alg;
3248 struct list_head entry;
3249 struct caam_alg_entry caam;
3250};
3251
3252static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
3253 bool uses_dkp)
3254{
3255 dma_addr_t dma_addr;
3256 struct caam_drv_private *priv;
3257
3258 ctx->jrdev = caam_jr_alloc();
3259 if (IS_ERR(ctx->jrdev)) {
3260 pr_err("Job Ring Device allocation for transform failed\n");
3261 return PTR_ERR(ctx->jrdev);
3262 }
3263
3264 priv = dev_get_drvdata(ctx->jrdev->parent);
3265 if (priv->era >= 6 && uses_dkp)
3266 ctx->dir = DMA_BIDIRECTIONAL;
3267 else
3268 ctx->dir = DMA_TO_DEVICE;
3269
3270 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
3271 offsetof(struct caam_ctx,
3272 sh_desc_enc_dma),
3273 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
3274 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
3275 dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
3276 caam_jr_free(ctx->jrdev);
3277 return -ENOMEM;
3278 }
3279
3280 ctx->sh_desc_enc_dma = dma_addr;
3281 ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
3282 sh_desc_dec);
3283 ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx,
3284 sh_desc_givenc);
3285 ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
3286
3287 /* copy descriptor header template value */
3288 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
3289 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
3290
3291 return 0;
3292}
3293
3294static int caam_cra_init(struct crypto_tfm *tfm)
3295{
3296 struct crypto_alg *alg = tfm->__crt_alg;
3297 struct caam_crypto_alg *caam_alg =
3298 container_of(alg, struct caam_crypto_alg, crypto_alg);
3299 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
3300
3301 return caam_init_common(ctx, &caam_alg->caam, false);
3302}
3303
3304static int caam_aead_init(struct crypto_aead *tfm)
3305{
3306 struct aead_alg *alg = crypto_aead_alg(tfm);
3307 struct caam_aead_alg *caam_alg =
3308 container_of(alg, struct caam_aead_alg, aead);
3309 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
3310
3311 return caam_init_common(ctx, &caam_alg->caam,
3312 alg->setkey == aead_setkey);
3313}
3314
3315static void caam_exit_common(struct caam_ctx *ctx)
3316{
3317 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
3318 offsetof(struct caam_ctx, sh_desc_enc_dma),
3319 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
3320 caam_jr_free(ctx->jrdev);
3321}
3322
3323static void caam_cra_exit(struct crypto_tfm *tfm)
3324{
3325 caam_exit_common(crypto_tfm_ctx(tfm));
3326}
3327
3328static void caam_aead_exit(struct crypto_aead *tfm)
3329{
3330 caam_exit_common(crypto_aead_ctx(tfm));
3331}
3332
3333static void __exit caam_algapi_exit(void)
3334{
3335
3336 struct caam_crypto_alg *t_alg, *n;
3337 int i;
3338
3339 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
3340 struct caam_aead_alg *t_alg = driver_aeads + i;
3341
3342 if (t_alg->registered)
3343 crypto_unregister_aead(&t_alg->aead);
3344 }
3345
3346 if (!alg_list.next)
3347 return;
3348
3349 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
3350 crypto_unregister_alg(&t_alg->crypto_alg);
3351 list_del(&t_alg->entry);
3352 kfree(t_alg);
3353 }
3354}
3355
3356static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
3357 *template)
3358{
3359 struct caam_crypto_alg *t_alg;
3360 struct crypto_alg *alg;
3361
3362 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
3363 if (!t_alg) {
3364 pr_err("failed to allocate t_alg\n");
3365 return ERR_PTR(-ENOMEM);
3366 }
3367
3368 alg = &t_alg->crypto_alg;
3369
3370 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
3371 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
3372 template->driver_name);
3373 alg->cra_module = THIS_MODULE;
3374 alg->cra_init = caam_cra_init;
3375 alg->cra_exit = caam_cra_exit;
3376 alg->cra_priority = CAAM_CRA_PRIORITY;
3377 alg->cra_blocksize = template->blocksize;
3378 alg->cra_alignmask = 0;
3379 alg->cra_ctxsize = sizeof(struct caam_ctx);
3380 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
3381 template->type;
3382 switch (template->type) {
3383 case CRYPTO_ALG_TYPE_GIVCIPHER:
3384 alg->cra_type = &crypto_givcipher_type;
3385 alg->cra_ablkcipher = template->template_ablkcipher;
3386 break;
3387 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3388 alg->cra_type = &crypto_ablkcipher_type;
3389 alg->cra_ablkcipher = template->template_ablkcipher;
3390 break;
3391 }
3392
3393 t_alg->caam.class1_alg_type = template->class1_alg_type;
3394 t_alg->caam.class2_alg_type = template->class2_alg_type;
3395
3396 return t_alg;
3397}
3398
3399static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
3400{
3401 struct aead_alg *alg = &t_alg->aead;
3402
3403 alg->base.cra_module = THIS_MODULE;
3404 alg->base.cra_priority = CAAM_CRA_PRIORITY;
3405 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
3406 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
3407
3408 alg->init = caam_aead_init;
3409 alg->exit = caam_aead_exit;
3410}
3411
3412static int __init caam_algapi_init(void)
3413{
3414 struct device_node *dev_node;
3415 struct platform_device *pdev;
3416 struct device *ctrldev;
3417 struct caam_drv_private *priv;
3418 int i = 0, err = 0;
3419 u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
3420 unsigned int md_limit = SHA512_DIGEST_SIZE;
3421 bool registered = false;
3422
3423 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
3424 if (!dev_node) {
3425 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
3426 if (!dev_node)
3427 return -ENODEV;
3428 }
3429
3430 pdev = of_find_device_by_node(dev_node);
3431 if (!pdev) {
3432 of_node_put(dev_node);
3433 return -ENODEV;
3434 }
3435
3436 ctrldev = &pdev->dev;
3437 priv = dev_get_drvdata(ctrldev);
3438 of_node_put(dev_node);
3439
3440 /*
3441 * If priv is NULL, it's probably because the caam driver wasn't
3442 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
3443 */
3444 if (!priv)
3445 return -ENODEV;
3446
3447
3448 INIT_LIST_HEAD(&alg_list);
3449
3450 /*
3451 * Register crypto algorithms the device supports.
3452 * First, detect presence and attributes of DES, AES, and MD blocks.
3453 */
3454 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
3455 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
3456 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
3457 aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
3458 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
3459
3460 /* If MD is present, limit digest size based on LP256 */
3461 if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
3462 md_limit = SHA256_DIGEST_SIZE;
3463
3464 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3465 struct caam_crypto_alg *t_alg;
3466 struct caam_alg_template *alg = driver_algs + i;
3467 u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
3468
3469 /* Skip DES algorithms if not supported by device */
3470 if (!des_inst &&
3471 ((alg_sel == OP_ALG_ALGSEL_3DES) ||
3472 (alg_sel == OP_ALG_ALGSEL_DES)))
3473 continue;
3474
3475 /* Skip AES algorithms if not supported by device */
3476 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
3477 continue;
3478
3479 /*
3480 * Check support for AES modes not available
3481 * on LP devices.
3482 */
3483 if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
3484 if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
3485 OP_ALG_AAI_XTS)
3486 continue;
3487
3488 t_alg = caam_alg_alloc(alg);
3489 if (IS_ERR(t_alg)) {
3490 err = PTR_ERR(t_alg);
3491 pr_warn("%s alg allocation failed\n", alg->driver_name);
3492 continue;
3493 }
3494
3495 err = crypto_register_alg(&t_alg->crypto_alg);
3496 if (err) {
3497 pr_warn("%s alg registration failed\n",
3498 t_alg->crypto_alg.cra_driver_name);
3499 kfree(t_alg);
3500 continue;
3501 }
3502
3503 list_add_tail(&t_alg->entry, &alg_list);
3504 registered = true;
3505 }
3506
3507 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
3508 struct caam_aead_alg *t_alg = driver_aeads + i;
3509 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
3510 OP_ALG_ALGSEL_MASK;
3511 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
3512 OP_ALG_ALGSEL_MASK;
3513 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
3514
3515 /* Skip DES algorithms if not supported by device */
3516 if (!des_inst &&
3517 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
3518 (c1_alg_sel == OP_ALG_ALGSEL_DES)))
3519 continue;
3520
3521 /* Skip AES algorithms if not supported by device */
3522 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
3523 continue;
3524
3525 /*
3526 * Check support for AES algorithms not available
3527 * on LP devices.
3528 */
3529 if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
3530 if (alg_aai == OP_ALG_AAI_GCM)
3531 continue;
3532
3533 /*
3534 * Skip algorithms requiring message digests
3535 * if MD or MD size is not supported by device.
3536 */
3537 if (c2_alg_sel &&
3538 (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
3539 continue;
3540
3541 caam_aead_alg_init(t_alg);
3542
3543 err = crypto_register_aead(&t_alg->aead);
3544 if (err) {
3545 pr_warn("%s alg registration failed\n",
3546 t_alg->aead.base.cra_driver_name);
3547 continue;
3548 }
3549
3550 t_alg->registered = true;
3551 registered = true;
3552 }
3553
3554 if (registered)
3555 pr_info("caam algorithms registered in /proc/crypto\n");
3556
3557 return err;
3558}
3559
3560module_init(caam_algapi_init);
3561module_exit(caam_algapi_exit);
3562
3563MODULE_LICENSE("GPL");
3564MODULE_DESCRIPTION("FSL CAAM support for crypto API");
3565MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");