blob: 8ef6e93e43f3c245cffc0c522d91cb8932b79436 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 *
5 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 *
7 * Scatterlist Crypto API glue code copied from files with the following:
8 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 *
10 * Crypto algorithm registration code copied from hifn driver:
11 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
12 * All rights reserved.
13 */
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/mod_devicetable.h>
18#include <linux/device.h>
19#include <linux/interrupt.h>
20#include <linux/crypto.h>
21#include <linux/hw_random.h>
22#include <linux/of_address.h>
23#include <linux/of_irq.h>
24#include <linux/of_platform.h>
25#include <linux/dma-mapping.h>
26#include <linux/io.h>
27#include <linux/spinlock.h>
28#include <linux/rtnetlink.h>
29#include <linux/slab.h>
30
31#include <crypto/algapi.h>
32#include <crypto/aes.h>
33#include <crypto/internal/des.h>
34#include <crypto/sha.h>
35#include <crypto/md5.h>
36#include <crypto/internal/aead.h>
37#include <crypto/authenc.h>
38#include <crypto/skcipher.h>
39#include <crypto/hash.h>
40#include <crypto/internal/hash.h>
41#include <crypto/scatterwalk.h>
42
43#include "talitos.h"
44
45static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
46 unsigned int len, bool is_sec1)
47{
48 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
49 if (is_sec1) {
50 ptr->len1 = cpu_to_be16(len);
51 } else {
52 ptr->len = cpu_to_be16(len);
53 ptr->eptr = upper_32_bits(dma_addr);
54 }
55}
56
57static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
58 struct talitos_ptr *src_ptr, bool is_sec1)
59{
60 dst_ptr->ptr = src_ptr->ptr;
61 if (is_sec1) {
62 dst_ptr->len1 = src_ptr->len1;
63 } else {
64 dst_ptr->len = src_ptr->len;
65 dst_ptr->eptr = src_ptr->eptr;
66 }
67}
68
69static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
70 bool is_sec1)
71{
72 if (is_sec1)
73 return be16_to_cpu(ptr->len1);
74 else
75 return be16_to_cpu(ptr->len);
76}
77
78static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
79 bool is_sec1)
80{
81 if (!is_sec1)
82 ptr->j_extent = val;
83}
84
85static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
86{
87 if (!is_sec1)
88 ptr->j_extent |= val;
89}
90
91/*
92 * map virtual single (contiguous) pointer to h/w descriptor pointer
93 */
94static void __map_single_talitos_ptr(struct device *dev,
95 struct talitos_ptr *ptr,
96 unsigned int len, void *data,
97 enum dma_data_direction dir,
98 unsigned long attrs)
99{
100 dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
101 struct talitos_private *priv = dev_get_drvdata(dev);
102 bool is_sec1 = has_ftr_sec1(priv);
103
104 to_talitos_ptr(ptr, dma_addr, len, is_sec1);
105}
106
107static void map_single_talitos_ptr(struct device *dev,
108 struct talitos_ptr *ptr,
109 unsigned int len, void *data,
110 enum dma_data_direction dir)
111{
112 __map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
113}
114
115static void map_single_talitos_ptr_nosync(struct device *dev,
116 struct talitos_ptr *ptr,
117 unsigned int len, void *data,
118 enum dma_data_direction dir)
119{
120 __map_single_talitos_ptr(dev, ptr, len, data, dir,
121 DMA_ATTR_SKIP_CPU_SYNC);
122}
123
124/*
125 * unmap bus single (contiguous) h/w descriptor pointer
126 */
127static void unmap_single_talitos_ptr(struct device *dev,
128 struct talitos_ptr *ptr,
129 enum dma_data_direction dir)
130{
131 struct talitos_private *priv = dev_get_drvdata(dev);
132 bool is_sec1 = has_ftr_sec1(priv);
133
134 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
135 from_talitos_ptr_len(ptr, is_sec1), dir);
136}
137
138static int reset_channel(struct device *dev, int ch)
139{
140 struct talitos_private *priv = dev_get_drvdata(dev);
141 unsigned int timeout = TALITOS_TIMEOUT;
142 bool is_sec1 = has_ftr_sec1(priv);
143
144 if (is_sec1) {
145 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
146 TALITOS1_CCCR_LO_RESET);
147
148 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
149 TALITOS1_CCCR_LO_RESET) && --timeout)
150 cpu_relax();
151 } else {
152 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
153 TALITOS2_CCCR_RESET);
154
155 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
156 TALITOS2_CCCR_RESET) && --timeout)
157 cpu_relax();
158 }
159
160 if (timeout == 0) {
161 dev_err(dev, "failed to reset channel %d\n", ch);
162 return -EIO;
163 }
164
165 /* set 36-bit addressing, done writeback enable and done IRQ enable */
166 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
167 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
168 /* enable chaining descriptors */
169 if (is_sec1)
170 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
171 TALITOS_CCCR_LO_NE);
172
173 /* and ICCR writeback, if available */
174 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
175 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
176 TALITOS_CCCR_LO_IWSE);
177
178 return 0;
179}
180
181static int reset_device(struct device *dev)
182{
183 struct talitos_private *priv = dev_get_drvdata(dev);
184 unsigned int timeout = TALITOS_TIMEOUT;
185 bool is_sec1 = has_ftr_sec1(priv);
186 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
187
188 setbits32(priv->reg + TALITOS_MCR, mcr);
189
190 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
191 && --timeout)
192 cpu_relax();
193
194 if (priv->irq[1]) {
195 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
196 setbits32(priv->reg + TALITOS_MCR, mcr);
197 }
198
199 if (timeout == 0) {
200 dev_err(dev, "failed to reset device\n");
201 return -EIO;
202 }
203
204 return 0;
205}
206
207/*
208 * Reset and initialize the device
209 */
210static int init_device(struct device *dev)
211{
212 struct talitos_private *priv = dev_get_drvdata(dev);
213 int ch, err;
214 bool is_sec1 = has_ftr_sec1(priv);
215
216 /*
217 * Master reset
218 * errata documentation: warning: certain SEC interrupts
219 * are not fully cleared by writing the MCR:SWR bit,
220 * set bit twice to completely reset
221 */
222 err = reset_device(dev);
223 if (err)
224 return err;
225
226 err = reset_device(dev);
227 if (err)
228 return err;
229
230 /* reset channels */
231 for (ch = 0; ch < priv->num_channels; ch++) {
232 err = reset_channel(dev, ch);
233 if (err)
234 return err;
235 }
236
237 /* enable channel done and error interrupts */
238 if (is_sec1) {
239 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
240 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
241 /* disable parity error check in DEU (erroneous? test vect.) */
242 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
243 } else {
244 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
245 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
246 }
247
248 /* disable integrity check error interrupts (use writeback instead) */
249 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
250 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
251 TALITOS_MDEUICR_LO_ICE);
252
253 return 0;
254}
255
256/**
257 * talitos_submit - submits a descriptor to the device for processing
258 * @dev: the SEC device to be used
259 * @ch: the SEC device channel to be used
260 * @desc: the descriptor to be processed by the device
261 * @callback: whom to call when processing is complete
262 * @context: a handle for use by caller (optional)
263 *
264 * desc must contain valid dma-mapped (bus physical) address pointers.
265 * callback must check err and feedback in descriptor header
266 * for device processing status.
267 */
268static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
269 void (*callback)(struct device *dev,
270 struct talitos_desc *desc,
271 void *context, int error),
272 void *context)
273{
274 struct talitos_private *priv = dev_get_drvdata(dev);
275 struct talitos_request *request;
276 unsigned long flags;
277 int head;
278 bool is_sec1 = has_ftr_sec1(priv);
279
280 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
281
282 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
283 /* h/w fifo is full */
284 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
285 return -EAGAIN;
286 }
287
288 head = priv->chan[ch].head;
289 request = &priv->chan[ch].fifo[head];
290
291 /* map descriptor and save caller data */
292 if (is_sec1) {
293 desc->hdr1 = desc->hdr;
294 request->dma_desc = dma_map_single(dev, &desc->hdr1,
295 TALITOS_DESC_SIZE,
296 DMA_BIDIRECTIONAL);
297 } else {
298 request->dma_desc = dma_map_single(dev, desc,
299 TALITOS_DESC_SIZE,
300 DMA_BIDIRECTIONAL);
301 }
302 request->callback = callback;
303 request->context = context;
304
305 /* increment fifo head */
306 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
307
308 smp_wmb();
309 request->desc = desc;
310
311 /* GO! */
312 wmb();
313 out_be32(priv->chan[ch].reg + TALITOS_FF,
314 upper_32_bits(request->dma_desc));
315 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
316 lower_32_bits(request->dma_desc));
317
318 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
319
320 return -EINPROGRESS;
321}
322
323static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
324{
325 struct talitos_edesc *edesc;
326
327 if (!is_sec1)
328 return request->desc->hdr;
329
330 if (!request->desc->next_desc)
331 return request->desc->hdr1;
332
333 edesc = container_of(request->desc, struct talitos_edesc, desc);
334
335 return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
336}
337
338/*
339 * process what was done, notify callback of error if not
340 */
341static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
342{
343 struct talitos_private *priv = dev_get_drvdata(dev);
344 struct talitos_request *request, saved_req;
345 unsigned long flags;
346 int tail, status;
347 bool is_sec1 = has_ftr_sec1(priv);
348
349 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
350
351 tail = priv->chan[ch].tail;
352 while (priv->chan[ch].fifo[tail].desc) {
353 __be32 hdr;
354
355 request = &priv->chan[ch].fifo[tail];
356
357 /* descriptors with their done bits set don't get the error */
358 rmb();
359 hdr = get_request_hdr(request, is_sec1);
360
361 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
362 status = 0;
363 else
364 if (!error)
365 break;
366 else
367 status = error;
368
369 dma_unmap_single(dev, request->dma_desc,
370 TALITOS_DESC_SIZE,
371 DMA_BIDIRECTIONAL);
372
373 /* copy entries so we can call callback outside lock */
374 saved_req.desc = request->desc;
375 saved_req.callback = request->callback;
376 saved_req.context = request->context;
377
378 /* release request entry in fifo */
379 smp_wmb();
380 request->desc = NULL;
381
382 /* increment fifo tail */
383 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
384
385 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
386
387 atomic_dec(&priv->chan[ch].submit_count);
388
389 saved_req.callback(dev, saved_req.desc, saved_req.context,
390 status);
391 /* channel may resume processing in single desc error case */
392 if (error && !reset_ch && status == error)
393 return;
394 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
395 tail = priv->chan[ch].tail;
396 }
397
398 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
399}
400
401/*
402 * process completed requests for channels that have done status
403 */
404#define DEF_TALITOS1_DONE(name, ch_done_mask) \
405static void talitos1_done_##name(unsigned long data) \
406{ \
407 struct device *dev = (struct device *)data; \
408 struct talitos_private *priv = dev_get_drvdata(dev); \
409 unsigned long flags; \
410 \
411 if (ch_done_mask & 0x10000000) \
412 flush_channel(dev, 0, 0, 0); \
413 if (ch_done_mask & 0x40000000) \
414 flush_channel(dev, 1, 0, 0); \
415 if (ch_done_mask & 0x00010000) \
416 flush_channel(dev, 2, 0, 0); \
417 if (ch_done_mask & 0x00040000) \
418 flush_channel(dev, 3, 0, 0); \
419 \
420 /* At this point, all completed channels have been processed */ \
421 /* Unmask done interrupts for channels completed later on. */ \
422 spin_lock_irqsave(&priv->reg_lock, flags); \
423 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
424 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
425 spin_unlock_irqrestore(&priv->reg_lock, flags); \
426}
427
428DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
429DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
430
431#define DEF_TALITOS2_DONE(name, ch_done_mask) \
432static void talitos2_done_##name(unsigned long data) \
433{ \
434 struct device *dev = (struct device *)data; \
435 struct talitos_private *priv = dev_get_drvdata(dev); \
436 unsigned long flags; \
437 \
438 if (ch_done_mask & 1) \
439 flush_channel(dev, 0, 0, 0); \
440 if (ch_done_mask & (1 << 2)) \
441 flush_channel(dev, 1, 0, 0); \
442 if (ch_done_mask & (1 << 4)) \
443 flush_channel(dev, 2, 0, 0); \
444 if (ch_done_mask & (1 << 6)) \
445 flush_channel(dev, 3, 0, 0); \
446 \
447 /* At this point, all completed channels have been processed */ \
448 /* Unmask done interrupts for channels completed later on. */ \
449 spin_lock_irqsave(&priv->reg_lock, flags); \
450 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
451 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
452 spin_unlock_irqrestore(&priv->reg_lock, flags); \
453}
454
455DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
456DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
457DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
458DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
459
460/*
461 * locate current (offending) descriptor
462 */
463static __be32 current_desc_hdr(struct device *dev, int ch)
464{
465 struct talitos_private *priv = dev_get_drvdata(dev);
466 int tail, iter;
467 dma_addr_t cur_desc;
468
469 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
470 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
471
472 if (!cur_desc) {
473 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
474 return 0;
475 }
476
477 tail = priv->chan[ch].tail;
478
479 iter = tail;
480 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
481 priv->chan[ch].fifo[iter].desc->next_desc != cpu_to_be32(cur_desc)) {
482 iter = (iter + 1) & (priv->fifo_len - 1);
483 if (iter == tail) {
484 dev_err(dev, "couldn't locate current descriptor\n");
485 return 0;
486 }
487 }
488
489 if (priv->chan[ch].fifo[iter].desc->next_desc == cpu_to_be32(cur_desc)) {
490 struct talitos_edesc *edesc;
491
492 edesc = container_of(priv->chan[ch].fifo[iter].desc,
493 struct talitos_edesc, desc);
494 return ((struct talitos_desc *)
495 (edesc->buf + edesc->dma_len))->hdr;
496 }
497
498 return priv->chan[ch].fifo[iter].desc->hdr;
499}
500
501/*
502 * user diagnostics; report root cause of error based on execution unit status
503 */
504static void report_eu_error(struct device *dev, int ch, __be32 desc_hdr)
505{
506 struct talitos_private *priv = dev_get_drvdata(dev);
507 int i;
508
509 if (!desc_hdr)
510 desc_hdr = cpu_to_be32(in_be32(priv->chan[ch].reg + TALITOS_DESCBUF));
511
512 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
513 case DESC_HDR_SEL0_AFEU:
514 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
515 in_be32(priv->reg_afeu + TALITOS_EUISR),
516 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
517 break;
518 case DESC_HDR_SEL0_DEU:
519 dev_err(dev, "DEUISR 0x%08x_%08x\n",
520 in_be32(priv->reg_deu + TALITOS_EUISR),
521 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
522 break;
523 case DESC_HDR_SEL0_MDEUA:
524 case DESC_HDR_SEL0_MDEUB:
525 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
526 in_be32(priv->reg_mdeu + TALITOS_EUISR),
527 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
528 break;
529 case DESC_HDR_SEL0_RNG:
530 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
531 in_be32(priv->reg_rngu + TALITOS_ISR),
532 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
533 break;
534 case DESC_HDR_SEL0_PKEU:
535 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
536 in_be32(priv->reg_pkeu + TALITOS_EUISR),
537 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
538 break;
539 case DESC_HDR_SEL0_AESU:
540 dev_err(dev, "AESUISR 0x%08x_%08x\n",
541 in_be32(priv->reg_aesu + TALITOS_EUISR),
542 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
543 break;
544 case DESC_HDR_SEL0_CRCU:
545 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
546 in_be32(priv->reg_crcu + TALITOS_EUISR),
547 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
548 break;
549 case DESC_HDR_SEL0_KEU:
550 dev_err(dev, "KEUISR 0x%08x_%08x\n",
551 in_be32(priv->reg_pkeu + TALITOS_EUISR),
552 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
553 break;
554 }
555
556 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
557 case DESC_HDR_SEL1_MDEUA:
558 case DESC_HDR_SEL1_MDEUB:
559 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
560 in_be32(priv->reg_mdeu + TALITOS_EUISR),
561 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
562 break;
563 case DESC_HDR_SEL1_CRCU:
564 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
565 in_be32(priv->reg_crcu + TALITOS_EUISR),
566 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
567 break;
568 }
569
570 for (i = 0; i < 8; i++)
571 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
572 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
573 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
574}
575
576/*
577 * recover from error interrupts
578 */
579static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
580{
581 struct talitos_private *priv = dev_get_drvdata(dev);
582 unsigned int timeout = TALITOS_TIMEOUT;
583 int ch, error, reset_dev = 0;
584 u32 v_lo;
585 bool is_sec1 = has_ftr_sec1(priv);
586 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
587
588 for (ch = 0; ch < priv->num_channels; ch++) {
589 /* skip channels without errors */
590 if (is_sec1) {
591 /* bits 29, 31, 17, 19 */
592 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
593 continue;
594 } else {
595 if (!(isr & (1 << (ch * 2 + 1))))
596 continue;
597 }
598
599 error = -EINVAL;
600
601 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
602
603 if (v_lo & TALITOS_CCPSR_LO_DOF) {
604 dev_err(dev, "double fetch fifo overflow error\n");
605 error = -EAGAIN;
606 reset_ch = 1;
607 }
608 if (v_lo & TALITOS_CCPSR_LO_SOF) {
609 /* h/w dropped descriptor */
610 dev_err(dev, "single fetch fifo overflow error\n");
611 error = -EAGAIN;
612 }
613 if (v_lo & TALITOS_CCPSR_LO_MDTE)
614 dev_err(dev, "master data transfer error\n");
615 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
616 dev_err(dev, is_sec1 ? "pointer not complete error\n"
617 : "s/g data length zero error\n");
618 if (v_lo & TALITOS_CCPSR_LO_FPZ)
619 dev_err(dev, is_sec1 ? "parity error\n"
620 : "fetch pointer zero error\n");
621 if (v_lo & TALITOS_CCPSR_LO_IDH)
622 dev_err(dev, "illegal descriptor header error\n");
623 if (v_lo & TALITOS_CCPSR_LO_IEU)
624 dev_err(dev, is_sec1 ? "static assignment error\n"
625 : "invalid exec unit error\n");
626 if (v_lo & TALITOS_CCPSR_LO_EU)
627 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
628 if (!is_sec1) {
629 if (v_lo & TALITOS_CCPSR_LO_GB)
630 dev_err(dev, "gather boundary error\n");
631 if (v_lo & TALITOS_CCPSR_LO_GRL)
632 dev_err(dev, "gather return/length error\n");
633 if (v_lo & TALITOS_CCPSR_LO_SB)
634 dev_err(dev, "scatter boundary error\n");
635 if (v_lo & TALITOS_CCPSR_LO_SRL)
636 dev_err(dev, "scatter return/length error\n");
637 }
638
639 flush_channel(dev, ch, error, reset_ch);
640
641 if (reset_ch) {
642 reset_channel(dev, ch);
643 } else {
644 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
645 TALITOS2_CCCR_CONT);
646 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
647 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
648 TALITOS2_CCCR_CONT) && --timeout)
649 cpu_relax();
650 if (timeout == 0) {
651 dev_err(dev, "failed to restart channel %d\n",
652 ch);
653 reset_dev = 1;
654 }
655 }
656 }
657 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
658 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
659 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
660 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
661 isr, isr_lo);
662 else
663 dev_err(dev, "done overflow, internal time out, or "
664 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
665
666 /* purge request queues */
667 for (ch = 0; ch < priv->num_channels; ch++)
668 flush_channel(dev, ch, -EIO, 1);
669
670 /* reset and reinitialize the device */
671 init_device(dev);
672 }
673}
674
675#define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
676static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
677{ \
678 struct device *dev = data; \
679 struct talitos_private *priv = dev_get_drvdata(dev); \
680 u32 isr, isr_lo; \
681 unsigned long flags; \
682 \
683 spin_lock_irqsave(&priv->reg_lock, flags); \
684 isr = in_be32(priv->reg + TALITOS_ISR); \
685 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
686 /* Acknowledge interrupt */ \
687 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
688 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
689 \
690 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
691 spin_unlock_irqrestore(&priv->reg_lock, flags); \
692 talitos_error(dev, isr & ch_err_mask, isr_lo); \
693 } \
694 else { \
695 if (likely(isr & ch_done_mask)) { \
696 /* mask further done interrupts. */ \
697 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
698 /* done_task will unmask done interrupts at exit */ \
699 tasklet_schedule(&priv->done_task[tlet]); \
700 } \
701 spin_unlock_irqrestore(&priv->reg_lock, flags); \
702 } \
703 \
704 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
705 IRQ_NONE; \
706}
707
708DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
709
710#define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
711static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
712{ \
713 struct device *dev = data; \
714 struct talitos_private *priv = dev_get_drvdata(dev); \
715 u32 isr, isr_lo; \
716 unsigned long flags; \
717 \
718 spin_lock_irqsave(&priv->reg_lock, flags); \
719 isr = in_be32(priv->reg + TALITOS_ISR); \
720 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
721 /* Acknowledge interrupt */ \
722 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
723 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
724 \
725 if (unlikely(isr & ch_err_mask || isr_lo)) { \
726 spin_unlock_irqrestore(&priv->reg_lock, flags); \
727 talitos_error(dev, isr & ch_err_mask, isr_lo); \
728 } \
729 else { \
730 if (likely(isr & ch_done_mask)) { \
731 /* mask further done interrupts. */ \
732 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
733 /* done_task will unmask done interrupts at exit */ \
734 tasklet_schedule(&priv->done_task[tlet]); \
735 } \
736 spin_unlock_irqrestore(&priv->reg_lock, flags); \
737 } \
738 \
739 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
740 IRQ_NONE; \
741}
742
743DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
744DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
745 0)
746DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
747 1)
748
749/*
750 * hwrng
751 */
752static int talitos_rng_data_present(struct hwrng *rng, int wait)
753{
754 struct device *dev = (struct device *)rng->priv;
755 struct talitos_private *priv = dev_get_drvdata(dev);
756 u32 ofl;
757 int i;
758
759 for (i = 0; i < 20; i++) {
760 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
761 TALITOS_RNGUSR_LO_OFL;
762 if (ofl || !wait)
763 break;
764 udelay(10);
765 }
766
767 return !!ofl;
768}
769
770static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
771{
772 struct device *dev = (struct device *)rng->priv;
773 struct talitos_private *priv = dev_get_drvdata(dev);
774
775 /* rng fifo requires 64-bit accesses */
776 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
777 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
778
779 return sizeof(u32);
780}
781
782static int talitos_rng_init(struct hwrng *rng)
783{
784 struct device *dev = (struct device *)rng->priv;
785 struct talitos_private *priv = dev_get_drvdata(dev);
786 unsigned int timeout = TALITOS_TIMEOUT;
787
788 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
789 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
790 & TALITOS_RNGUSR_LO_RD)
791 && --timeout)
792 cpu_relax();
793 if (timeout == 0) {
794 dev_err(dev, "failed to reset rng hw\n");
795 return -ENODEV;
796 }
797
798 /* start generating */
799 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
800
801 return 0;
802}
803
804static int talitos_register_rng(struct device *dev)
805{
806 struct talitos_private *priv = dev_get_drvdata(dev);
807 int err;
808
809 priv->rng.name = dev_driver_string(dev),
810 priv->rng.init = talitos_rng_init,
811 priv->rng.data_present = talitos_rng_data_present,
812 priv->rng.data_read = talitos_rng_data_read,
813 priv->rng.priv = (unsigned long)dev;
814
815 err = hwrng_register(&priv->rng);
816 if (!err)
817 priv->rng_registered = true;
818
819 return err;
820}
821
822static void talitos_unregister_rng(struct device *dev)
823{
824 struct talitos_private *priv = dev_get_drvdata(dev);
825
826 if (!priv->rng_registered)
827 return;
828
829 hwrng_unregister(&priv->rng);
830 priv->rng_registered = false;
831}
832
833/*
834 * crypto alg
835 */
836#define TALITOS_CRA_PRIORITY 3000
837/*
838 * Defines a priority for doing AEAD with descriptors type
839 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
840 */
841#define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
842#ifdef CONFIG_CRYPTO_DEV_TALITOS2
843#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
844#else
845#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
846#endif
847#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
848
849struct talitos_ctx {
850 struct device *dev;
851 int ch;
852 __be32 desc_hdr_template;
853 u8 key[TALITOS_MAX_KEY_SIZE];
854 u8 iv[TALITOS_MAX_IV_LENGTH];
855 dma_addr_t dma_key;
856 unsigned int keylen;
857 unsigned int enckeylen;
858 unsigned int authkeylen;
859};
860
861#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
862#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
863
864struct talitos_ahash_req_ctx {
865 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
866 unsigned int hw_context_size;
867 u8 buf[2][HASH_MAX_BLOCK_SIZE];
868 int buf_idx;
869 unsigned int swinit;
870 unsigned int first;
871 unsigned int last;
872 unsigned int to_hash_later;
873 unsigned int nbuf;
874 struct scatterlist bufsl[2];
875 struct scatterlist *psrc;
876};
877
878struct talitos_export_state {
879 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
880 u8 buf[HASH_MAX_BLOCK_SIZE];
881 unsigned int swinit;
882 unsigned int first;
883 unsigned int last;
884 unsigned int to_hash_later;
885 unsigned int nbuf;
886};
887
888static int aead_setkey(struct crypto_aead *authenc,
889 const u8 *key, unsigned int keylen)
890{
891 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
892 struct device *dev = ctx->dev;
893 struct crypto_authenc_keys keys;
894
895 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
896 goto badkey;
897
898 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
899 goto badkey;
900
901 if (ctx->keylen)
902 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
903
904 memcpy(ctx->key, keys.authkey, keys.authkeylen);
905 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
906
907 ctx->keylen = keys.authkeylen + keys.enckeylen;
908 ctx->enckeylen = keys.enckeylen;
909 ctx->authkeylen = keys.authkeylen;
910 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
911 DMA_TO_DEVICE);
912
913 memzero_explicit(&keys, sizeof(keys));
914 return 0;
915
916badkey:
917 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
918 memzero_explicit(&keys, sizeof(keys));
919 return -EINVAL;
920}
921
922static int aead_des3_setkey(struct crypto_aead *authenc,
923 const u8 *key, unsigned int keylen)
924{
925 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
926 struct device *dev = ctx->dev;
927 struct crypto_authenc_keys keys;
928 int err;
929
930 err = crypto_authenc_extractkeys(&keys, key, keylen);
931 if (unlikely(err))
932 goto badkey;
933
934 err = -EINVAL;
935 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
936 goto badkey;
937
938 err = verify_aead_des3_key(authenc, keys.enckey, keys.enckeylen);
939 if (err)
940 goto out;
941
942 if (ctx->keylen)
943 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
944
945 memcpy(ctx->key, keys.authkey, keys.authkeylen);
946 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
947
948 ctx->keylen = keys.authkeylen + keys.enckeylen;
949 ctx->enckeylen = keys.enckeylen;
950 ctx->authkeylen = keys.authkeylen;
951 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
952 DMA_TO_DEVICE);
953
954out:
955 memzero_explicit(&keys, sizeof(keys));
956 return err;
957
958badkey:
959 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
960 goto out;
961}
962
963static void talitos_sg_unmap(struct device *dev,
964 struct talitos_edesc *edesc,
965 struct scatterlist *src,
966 struct scatterlist *dst,
967 unsigned int len, unsigned int offset)
968{
969 struct talitos_private *priv = dev_get_drvdata(dev);
970 bool is_sec1 = has_ftr_sec1(priv);
971 unsigned int src_nents = edesc->src_nents ? : 1;
972 unsigned int dst_nents = edesc->dst_nents ? : 1;
973
974 if (is_sec1 && dst && dst_nents > 1) {
975 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
976 len, DMA_FROM_DEVICE);
977 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
978 offset);
979 }
980 if (src != dst) {
981 if (src_nents == 1 || !is_sec1)
982 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
983
984 if (dst && (dst_nents == 1 || !is_sec1))
985 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
986 } else if (src_nents == 1 || !is_sec1) {
987 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
988 }
989}
990
991static void ipsec_esp_unmap(struct device *dev,
992 struct talitos_edesc *edesc,
993 struct aead_request *areq, bool encrypt)
994{
995 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
996 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
997 unsigned int ivsize = crypto_aead_ivsize(aead);
998 unsigned int authsize = crypto_aead_authsize(aead);
999 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1000 bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
1001 struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
1002
1003 if (is_ipsec_esp)
1004 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
1005 DMA_FROM_DEVICE);
1006 unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
1007
1008 talitos_sg_unmap(dev, edesc, areq->src, areq->dst,
1009 cryptlen + authsize, areq->assoclen);
1010
1011 if (edesc->dma_len)
1012 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1013 DMA_BIDIRECTIONAL);
1014
1015 if (!is_ipsec_esp) {
1016 unsigned int dst_nents = edesc->dst_nents ? : 1;
1017
1018 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
1019 areq->assoclen + cryptlen - ivsize);
1020 }
1021}
1022
1023/*
1024 * ipsec_esp descriptor callbacks
1025 */
1026static void ipsec_esp_encrypt_done(struct device *dev,
1027 struct talitos_desc *desc, void *context,
1028 int err)
1029{
1030 struct aead_request *areq = context;
1031 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1032 unsigned int ivsize = crypto_aead_ivsize(authenc);
1033 struct talitos_edesc *edesc;
1034
1035 edesc = container_of(desc, struct talitos_edesc, desc);
1036
1037 ipsec_esp_unmap(dev, edesc, areq, true);
1038
1039 dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1040
1041 kfree(edesc);
1042
1043 aead_request_complete(areq, err);
1044}
1045
1046static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1047 struct talitos_desc *desc,
1048 void *context, int err)
1049{
1050 struct aead_request *req = context;
1051 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1052 unsigned int authsize = crypto_aead_authsize(authenc);
1053 struct talitos_edesc *edesc;
1054 char *oicv, *icv;
1055
1056 edesc = container_of(desc, struct talitos_edesc, desc);
1057
1058 ipsec_esp_unmap(dev, edesc, req, false);
1059
1060 if (!err) {
1061 /* auth check */
1062 oicv = edesc->buf + edesc->dma_len;
1063 icv = oicv - authsize;
1064
1065 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1066 }
1067
1068 kfree(edesc);
1069
1070 aead_request_complete(req, err);
1071}
1072
1073static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1074 struct talitos_desc *desc,
1075 void *context, int err)
1076{
1077 struct aead_request *req = context;
1078 struct talitos_edesc *edesc;
1079
1080 edesc = container_of(desc, struct talitos_edesc, desc);
1081
1082 ipsec_esp_unmap(dev, edesc, req, false);
1083
1084 /* check ICV auth status */
1085 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1086 DESC_HDR_LO_ICCR1_PASS))
1087 err = -EBADMSG;
1088
1089 kfree(edesc);
1090
1091 aead_request_complete(req, err);
1092}
1093
1094/*
1095 * convert scatterlist to SEC h/w link table format
1096 * stop at cryptlen bytes
1097 */
1098static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1099 unsigned int offset, int datalen, int elen,
1100 struct talitos_ptr *link_tbl_ptr, int align)
1101{
1102 int n_sg = elen ? sg_count + 1 : sg_count;
1103 int count = 0;
1104 int cryptlen = datalen + elen;
1105 int padding = ALIGN(cryptlen, align) - cryptlen;
1106
1107 while (cryptlen && sg && n_sg--) {
1108 unsigned int len = sg_dma_len(sg);
1109
1110 if (offset >= len) {
1111 offset -= len;
1112 goto next;
1113 }
1114
1115 len -= offset;
1116
1117 if (len > cryptlen)
1118 len = cryptlen;
1119
1120 if (datalen > 0 && len > datalen) {
1121 to_talitos_ptr(link_tbl_ptr + count,
1122 sg_dma_address(sg) + offset, datalen, 0);
1123 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1124 count++;
1125 len -= datalen;
1126 offset += datalen;
1127 }
1128 to_talitos_ptr(link_tbl_ptr + count,
1129 sg_dma_address(sg) + offset, sg_next(sg) ? len : len + padding, 0);
1130 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1131 count++;
1132 cryptlen -= len;
1133 datalen -= len;
1134 offset = 0;
1135
1136next:
1137 sg = sg_next(sg);
1138 }
1139
1140 /* tag end of link table */
1141 if (count > 0)
1142 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1143 DESC_PTR_LNKTBL_RET, 0);
1144
1145 return count;
1146}
1147
1148static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1149 unsigned int len, struct talitos_edesc *edesc,
1150 struct talitos_ptr *ptr, int sg_count,
1151 unsigned int offset, int tbl_off, int elen,
1152 bool force, int align)
1153{
1154 struct talitos_private *priv = dev_get_drvdata(dev);
1155 bool is_sec1 = has_ftr_sec1(priv);
1156 int aligned_len = ALIGN(len, align);
1157
1158 if (!src) {
1159 to_talitos_ptr(ptr, 0, 0, is_sec1);
1160 return 1;
1161 }
1162 to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1163 if (sg_count == 1 && !force) {
1164 to_talitos_ptr(ptr, sg_dma_address(src) + offset, aligned_len, is_sec1);
1165 return sg_count;
1166 }
1167 if (is_sec1) {
1168 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, aligned_len, is_sec1);
1169 return sg_count;
1170 }
1171 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
1172 &edesc->link_tbl[tbl_off], align);
1173 if (sg_count == 1 && !force) {
1174 /* Only one segment now, so no link tbl needed*/
1175 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1176 return sg_count;
1177 }
1178 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1179 tbl_off * sizeof(struct talitos_ptr), aligned_len, is_sec1);
1180 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1181
1182 return sg_count;
1183}
1184
1185static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1186 unsigned int len, struct talitos_edesc *edesc,
1187 struct talitos_ptr *ptr, int sg_count,
1188 unsigned int offset, int tbl_off)
1189{
1190 return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1191 tbl_off, 0, false, 1);
1192}
1193
1194/*
1195 * fill in and submit ipsec_esp descriptor
1196 */
1197static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1198 bool encrypt,
1199 void (*callback)(struct device *dev,
1200 struct talitos_desc *desc,
1201 void *context, int error))
1202{
1203 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1204 unsigned int authsize = crypto_aead_authsize(aead);
1205 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1206 struct device *dev = ctx->dev;
1207 struct talitos_desc *desc = &edesc->desc;
1208 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1209 unsigned int ivsize = crypto_aead_ivsize(aead);
1210 int tbl_off = 0;
1211 int sg_count, ret;
1212 int elen = 0;
1213 bool sync_needed = false;
1214 struct talitos_private *priv = dev_get_drvdata(dev);
1215 bool is_sec1 = has_ftr_sec1(priv);
1216 bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1217 struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1218 struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1219 dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize;
1220
1221 /* hmac key */
1222 to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1223
1224 sg_count = edesc->src_nents ?: 1;
1225 if (is_sec1 && sg_count > 1)
1226 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1227 areq->assoclen + cryptlen);
1228 else
1229 sg_count = dma_map_sg(dev, areq->src, sg_count,
1230 (areq->src == areq->dst) ?
1231 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1232
1233 /* hmac data */
1234 ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1235 &desc->ptr[1], sg_count, 0, tbl_off);
1236
1237 if (ret > 1) {
1238 tbl_off += ret;
1239 sync_needed = true;
1240 }
1241
1242 /* cipher iv */
1243 to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1244
1245 /* cipher key */
1246 to_talitos_ptr(ckey_ptr, ctx->dma_key + ctx->authkeylen,
1247 ctx->enckeylen, is_sec1);
1248
1249 /*
1250 * cipher in
1251 * map and adjust cipher len to aead request cryptlen.
1252 * extent is bytes of HMAC postpended to ciphertext,
1253 * typically 12 for ipsec
1254 */
1255 if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1256 elen = authsize;
1257
1258 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1259 sg_count, areq->assoclen, tbl_off, elen,
1260 false, 1);
1261
1262 if (ret > 1) {
1263 tbl_off += ret;
1264 sync_needed = true;
1265 }
1266
1267 /* cipher out */
1268 if (areq->src != areq->dst) {
1269 sg_count = edesc->dst_nents ? : 1;
1270 if (!is_sec1 || sg_count == 1)
1271 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1272 }
1273
1274 if (is_ipsec_esp && encrypt)
1275 elen = authsize;
1276 else
1277 elen = 0;
1278 ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1279 sg_count, areq->assoclen, tbl_off, elen,
1280 is_ipsec_esp && !encrypt, 1);
1281 tbl_off += ret;
1282
1283 if (!encrypt && is_ipsec_esp) {
1284 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1285
1286 /* Add an entry to the link table for ICV data */
1287 to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1288 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1);
1289
1290 /* icv data follows link tables */
1291 to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1);
1292 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1293 sync_needed = true;
1294 } else if (!encrypt) {
1295 to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1);
1296 sync_needed = true;
1297 } else if (!is_ipsec_esp) {
1298 talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6],
1299 sg_count, areq->assoclen + cryptlen, tbl_off);
1300 }
1301
1302 /* iv out */
1303 if (is_ipsec_esp)
1304 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1305 DMA_FROM_DEVICE);
1306
1307 if (sync_needed)
1308 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1309 edesc->dma_len,
1310 DMA_BIDIRECTIONAL);
1311
1312 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1313 if (ret != -EINPROGRESS) {
1314 ipsec_esp_unmap(dev, edesc, areq, encrypt);
1315 kfree(edesc);
1316 }
1317 return ret;
1318}
1319
1320/*
1321 * allocate and map the extended descriptor
1322 */
1323static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1324 struct scatterlist *src,
1325 struct scatterlist *dst,
1326 u8 *iv,
1327 unsigned int assoclen,
1328 unsigned int cryptlen,
1329 unsigned int authsize,
1330 unsigned int ivsize,
1331 int icv_stashing,
1332 u32 cryptoflags,
1333 bool encrypt)
1334{
1335 struct talitos_edesc *edesc;
1336 int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1337 dma_addr_t iv_dma = 0;
1338 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1339 GFP_ATOMIC;
1340 struct talitos_private *priv = dev_get_drvdata(dev);
1341 bool is_sec1 = has_ftr_sec1(priv);
1342 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1343
1344 if (cryptlen + authsize > max_len) {
1345 dev_err(dev, "length exceeds h/w max limit\n");
1346 return ERR_PTR(-EINVAL);
1347 }
1348
1349 if (!dst || dst == src) {
1350 src_len = assoclen + cryptlen + authsize;
1351 src_nents = sg_nents_for_len(src, src_len);
1352 if (src_nents < 0) {
1353 dev_err(dev, "Invalid number of src SG.\n");
1354 return ERR_PTR(-EINVAL);
1355 }
1356 src_nents = (src_nents == 1) ? 0 : src_nents;
1357 dst_nents = dst ? src_nents : 0;
1358 dst_len = 0;
1359 } else { /* dst && dst != src*/
1360 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1361 src_nents = sg_nents_for_len(src, src_len);
1362 if (src_nents < 0) {
1363 dev_err(dev, "Invalid number of src SG.\n");
1364 return ERR_PTR(-EINVAL);
1365 }
1366 src_nents = (src_nents == 1) ? 0 : src_nents;
1367 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1368 dst_nents = sg_nents_for_len(dst, dst_len);
1369 if (dst_nents < 0) {
1370 dev_err(dev, "Invalid number of dst SG.\n");
1371 return ERR_PTR(-EINVAL);
1372 }
1373 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1374 }
1375
1376 /*
1377 * allocate space for base edesc plus the link tables,
1378 * allowing for two separate entries for AD and generated ICV (+ 2),
1379 * and space for two sets of ICVs (stashed and generated)
1380 */
1381 alloc_len = sizeof(struct talitos_edesc);
1382 if (src_nents || dst_nents || !encrypt) {
1383 if (is_sec1)
1384 dma_len = (src_nents ? src_len : 0) +
1385 (dst_nents ? dst_len : 0) + authsize;
1386 else
1387 dma_len = (src_nents + dst_nents + 2) *
1388 sizeof(struct talitos_ptr) + authsize;
1389 alloc_len += dma_len;
1390 } else {
1391 dma_len = 0;
1392 }
1393 alloc_len += icv_stashing ? authsize : 0;
1394
1395 /* if its a ahash, add space for a second desc next to the first one */
1396 if (is_sec1 && !dst)
1397 alloc_len += sizeof(struct talitos_desc);
1398 alloc_len += ivsize;
1399
1400 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1401 if (!edesc)
1402 return ERR_PTR(-ENOMEM);
1403 if (ivsize) {
1404 iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1405 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1406 }
1407 memset(&edesc->desc, 0, sizeof(edesc->desc));
1408
1409 edesc->src_nents = src_nents;
1410 edesc->dst_nents = dst_nents;
1411 edesc->iv_dma = iv_dma;
1412 edesc->dma_len = dma_len;
1413 if (dma_len)
1414 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1415 edesc->dma_len,
1416 DMA_BIDIRECTIONAL);
1417
1418 return edesc;
1419}
1420
1421static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1422 int icv_stashing, bool encrypt)
1423{
1424 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1425 unsigned int authsize = crypto_aead_authsize(authenc);
1426 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1427 unsigned int ivsize = crypto_aead_ivsize(authenc);
1428 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1429
1430 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1431 iv, areq->assoclen, cryptlen,
1432 authsize, ivsize, icv_stashing,
1433 areq->base.flags, encrypt);
1434}
1435
1436static int aead_encrypt(struct aead_request *req)
1437{
1438 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1439 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1440 struct talitos_edesc *edesc;
1441
1442 /* allocate extended descriptor */
1443 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1444 if (IS_ERR(edesc))
1445 return PTR_ERR(edesc);
1446
1447 /* set encrypt */
1448 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1449
1450 return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
1451}
1452
1453static int aead_decrypt(struct aead_request *req)
1454{
1455 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1456 unsigned int authsize = crypto_aead_authsize(authenc);
1457 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1458 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1459 struct talitos_edesc *edesc;
1460 void *icvdata;
1461
1462 /* allocate extended descriptor */
1463 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1464 if (IS_ERR(edesc))
1465 return PTR_ERR(edesc);
1466
1467 if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1468 (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1469 ((!edesc->src_nents && !edesc->dst_nents) ||
1470 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1471
1472 /* decrypt and check the ICV */
1473 edesc->desc.hdr = ctx->desc_hdr_template |
1474 DESC_HDR_DIR_INBOUND |
1475 DESC_HDR_MODE1_MDEU_CICV;
1476
1477 /* reset integrity check result bits */
1478
1479 return ipsec_esp(edesc, req, false,
1480 ipsec_esp_decrypt_hwauth_done);
1481 }
1482
1483 /* Have to check the ICV with software */
1484 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1485
1486 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1487 icvdata = edesc->buf + edesc->dma_len;
1488
1489 sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
1490 req->assoclen + req->cryptlen - authsize);
1491
1492 return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
1493}
1494
1495static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1496 const u8 *key, unsigned int keylen)
1497{
1498 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1499 struct device *dev = ctx->dev;
1500
1501 if (ctx->keylen)
1502 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1503
1504 memcpy(&ctx->key, key, keylen);
1505 ctx->keylen = keylen;
1506
1507 ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1508
1509 return 0;
1510}
1511
1512static int ablkcipher_des_setkey(struct crypto_ablkcipher *cipher,
1513 const u8 *key, unsigned int keylen)
1514{
1515 return verify_ablkcipher_des_key(cipher, key) ?:
1516 ablkcipher_setkey(cipher, key, keylen);
1517}
1518
1519static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
1520 const u8 *key, unsigned int keylen)
1521{
1522 return verify_ablkcipher_des3_key(cipher, key) ?:
1523 ablkcipher_setkey(cipher, key, keylen);
1524}
1525
1526static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
1527 const u8 *key, unsigned int keylen)
1528{
1529 if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
1530 keylen == AES_KEYSIZE_256)
1531 return ablkcipher_setkey(cipher, key, keylen);
1532
1533 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1534
1535 return -EINVAL;
1536}
1537
1538static void common_nonsnoop_unmap(struct device *dev,
1539 struct talitos_edesc *edesc,
1540 struct ablkcipher_request *areq)
1541{
1542 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1543
1544 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1545 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1546
1547 if (edesc->dma_len)
1548 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1549 DMA_BIDIRECTIONAL);
1550}
1551
1552static void ablkcipher_done(struct device *dev,
1553 struct talitos_desc *desc, void *context,
1554 int err)
1555{
1556 struct ablkcipher_request *areq = context;
1557 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1558 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1559 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1560 struct talitos_edesc *edesc;
1561
1562 edesc = container_of(desc, struct talitos_edesc, desc);
1563
1564 common_nonsnoop_unmap(dev, edesc, areq);
1565 memcpy(areq->info, ctx->iv, ivsize);
1566
1567 kfree(edesc);
1568
1569 areq->base.complete(&areq->base, err);
1570}
1571
1572static int common_nonsnoop(struct talitos_edesc *edesc,
1573 struct ablkcipher_request *areq,
1574 void (*callback) (struct device *dev,
1575 struct talitos_desc *desc,
1576 void *context, int error))
1577{
1578 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1579 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1580 struct device *dev = ctx->dev;
1581 struct talitos_desc *desc = &edesc->desc;
1582 unsigned int cryptlen = areq->nbytes;
1583 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1584 int sg_count, ret;
1585 bool sync_needed = false;
1586 struct talitos_private *priv = dev_get_drvdata(dev);
1587 bool is_sec1 = has_ftr_sec1(priv);
1588 bool is_ctr = (desc->hdr & DESC_HDR_SEL0_MASK) == DESC_HDR_SEL0_AESU &&
1589 (desc->hdr & DESC_HDR_MODE0_AESU_MASK) == DESC_HDR_MODE0_AESU_CTR;
1590
1591 /* first DWORD empty */
1592
1593 /* cipher iv */
1594 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1595
1596 /* cipher key */
1597 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1598
1599 sg_count = edesc->src_nents ?: 1;
1600 if (is_sec1 && sg_count > 1)
1601 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1602 cryptlen);
1603 else
1604 sg_count = dma_map_sg(dev, areq->src, sg_count,
1605 (areq->src == areq->dst) ?
1606 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1607 /*
1608 * cipher in
1609 */
1610 sg_count = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[3],
1611 sg_count, 0, 0, 0, false, is_ctr ? 16 : 1);
1612 if (sg_count > 1)
1613 sync_needed = true;
1614
1615 /* cipher out */
1616 if (areq->src != areq->dst) {
1617 sg_count = edesc->dst_nents ? : 1;
1618 if (!is_sec1 || sg_count == 1)
1619 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1620 }
1621
1622 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1623 sg_count, 0, (edesc->src_nents + 1));
1624 if (ret > 1)
1625 sync_needed = true;
1626
1627 /* iv out */
1628 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1629 DMA_FROM_DEVICE);
1630
1631 /* last DWORD empty */
1632
1633 if (sync_needed)
1634 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1635 edesc->dma_len, DMA_BIDIRECTIONAL);
1636
1637 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1638 if (ret != -EINPROGRESS) {
1639 common_nonsnoop_unmap(dev, edesc, areq);
1640 kfree(edesc);
1641 }
1642 return ret;
1643}
1644
1645static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1646 areq, bool encrypt)
1647{
1648 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1649 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1650 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1651
1652 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1653 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1654 areq->base.flags, encrypt);
1655}
1656
1657static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1658{
1659 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1660 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1661 struct talitos_edesc *edesc;
1662 unsigned int blocksize =
1663 crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1664
1665 if (!areq->nbytes)
1666 return 0;
1667
1668 if (areq->nbytes % blocksize)
1669 return -EINVAL;
1670
1671 /* allocate extended descriptor */
1672 edesc = ablkcipher_edesc_alloc(areq, true);
1673 if (IS_ERR(edesc))
1674 return PTR_ERR(edesc);
1675
1676 /* set encrypt */
1677 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1678
1679 return common_nonsnoop(edesc, areq, ablkcipher_done);
1680}
1681
1682static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1683{
1684 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1685 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1686 struct talitos_edesc *edesc;
1687 unsigned int blocksize =
1688 crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1689
1690 if (!areq->nbytes)
1691 return 0;
1692
1693 if (areq->nbytes % blocksize)
1694 return -EINVAL;
1695
1696 /* allocate extended descriptor */
1697 edesc = ablkcipher_edesc_alloc(areq, false);
1698 if (IS_ERR(edesc))
1699 return PTR_ERR(edesc);
1700
1701 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1702
1703 return common_nonsnoop(edesc, areq, ablkcipher_done);
1704}
1705
1706static void common_nonsnoop_hash_unmap(struct device *dev,
1707 struct talitos_edesc *edesc,
1708 struct ahash_request *areq)
1709{
1710 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1711 struct talitos_private *priv = dev_get_drvdata(dev);
1712 bool is_sec1 = has_ftr_sec1(priv);
1713 struct talitos_desc *desc = &edesc->desc;
1714 struct talitos_desc *desc2 = (struct talitos_desc *)
1715 (edesc->buf + edesc->dma_len);
1716
1717 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1718 if (desc->next_desc &&
1719 desc->ptr[5].ptr != desc2->ptr[5].ptr)
1720 unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1721
1722 if (req_ctx->psrc)
1723 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1724
1725 /* When using hashctx-in, must unmap it. */
1726 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1727 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1728 DMA_TO_DEVICE);
1729 else if (desc->next_desc)
1730 unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1731 DMA_TO_DEVICE);
1732
1733 if (is_sec1 && req_ctx->nbuf)
1734 unmap_single_talitos_ptr(dev, &desc->ptr[3],
1735 DMA_TO_DEVICE);
1736
1737 if (edesc->dma_len)
1738 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1739 DMA_BIDIRECTIONAL);
1740
1741 if (edesc->desc.next_desc)
1742 dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1743 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1744}
1745
1746static void ahash_done(struct device *dev,
1747 struct talitos_desc *desc, void *context,
1748 int err)
1749{
1750 struct ahash_request *areq = context;
1751 struct talitos_edesc *edesc =
1752 container_of(desc, struct talitos_edesc, desc);
1753 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1754
1755 if (!req_ctx->last && req_ctx->to_hash_later) {
1756 /* Position any partial block for next update/final/finup */
1757 req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1758 req_ctx->nbuf = req_ctx->to_hash_later;
1759 }
1760 common_nonsnoop_hash_unmap(dev, edesc, areq);
1761
1762 kfree(edesc);
1763
1764 areq->base.complete(&areq->base, err);
1765}
1766
1767/*
1768 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1769 * ourself and submit a padded block
1770 */
1771static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1772 struct talitos_edesc *edesc,
1773 struct talitos_ptr *ptr)
1774{
1775 static u8 padded_hash[64] = {
1776 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1777 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1778 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1779 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1780 };
1781
1782 pr_err_once("Bug in SEC1, padding ourself\n");
1783 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1784 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1785 (char *)padded_hash, DMA_TO_DEVICE);
1786}
1787
1788static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1789 struct ahash_request *areq, unsigned int length,
1790 void (*callback) (struct device *dev,
1791 struct talitos_desc *desc,
1792 void *context, int error))
1793{
1794 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1795 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1796 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1797 struct device *dev = ctx->dev;
1798 struct talitos_desc *desc = &edesc->desc;
1799 int ret;
1800 bool sync_needed = false;
1801 struct talitos_private *priv = dev_get_drvdata(dev);
1802 bool is_sec1 = has_ftr_sec1(priv);
1803 int sg_count;
1804
1805 /* first DWORD empty */
1806
1807 /* hash context in */
1808 if (!req_ctx->first || req_ctx->swinit) {
1809 map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1810 req_ctx->hw_context_size,
1811 req_ctx->hw_context,
1812 DMA_TO_DEVICE);
1813 req_ctx->swinit = 0;
1814 }
1815 /* Indicate next op is not the first. */
1816 req_ctx->first = 0;
1817
1818 /* HMAC key */
1819 if (ctx->keylen)
1820 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1821 is_sec1);
1822
1823 if (is_sec1 && req_ctx->nbuf)
1824 length -= req_ctx->nbuf;
1825
1826 sg_count = edesc->src_nents ?: 1;
1827 if (is_sec1 && sg_count > 1)
1828 sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
1829 else if (length)
1830 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1831 DMA_TO_DEVICE);
1832 /*
1833 * data in
1834 */
1835 if (is_sec1 && req_ctx->nbuf) {
1836 map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1837 req_ctx->buf[req_ctx->buf_idx],
1838 DMA_TO_DEVICE);
1839 } else {
1840 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1841 &desc->ptr[3], sg_count, 0, 0);
1842 if (sg_count > 1)
1843 sync_needed = true;
1844 }
1845
1846 /* fifth DWORD empty */
1847
1848 /* hash/HMAC out -or- hash context out */
1849 if (req_ctx->last)
1850 map_single_talitos_ptr(dev, &desc->ptr[5],
1851 crypto_ahash_digestsize(tfm),
1852 areq->result, DMA_FROM_DEVICE);
1853 else
1854 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1855 req_ctx->hw_context_size,
1856 req_ctx->hw_context,
1857 DMA_FROM_DEVICE);
1858
1859 /* last DWORD empty */
1860
1861 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1862 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1863
1864 if (is_sec1 && req_ctx->nbuf && length) {
1865 struct talitos_desc *desc2 = (struct talitos_desc *)
1866 (edesc->buf + edesc->dma_len);
1867 dma_addr_t next_desc;
1868
1869 memset(desc2, 0, sizeof(*desc2));
1870 desc2->hdr = desc->hdr;
1871 desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1872 desc2->hdr1 = desc2->hdr;
1873 desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1874 desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1875 desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1876
1877 if (desc->ptr[1].ptr)
1878 copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1879 is_sec1);
1880 else
1881 map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1882 req_ctx->hw_context_size,
1883 req_ctx->hw_context,
1884 DMA_TO_DEVICE);
1885 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1886 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1887 &desc2->ptr[3], sg_count, 0, 0);
1888 if (sg_count > 1)
1889 sync_needed = true;
1890 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1891 if (req_ctx->last)
1892 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1893 req_ctx->hw_context_size,
1894 req_ctx->hw_context,
1895 DMA_FROM_DEVICE);
1896
1897 next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1898 DMA_BIDIRECTIONAL);
1899 desc->next_desc = cpu_to_be32(next_desc);
1900 }
1901
1902 if (sync_needed)
1903 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1904 edesc->dma_len, DMA_BIDIRECTIONAL);
1905
1906 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1907 if (ret != -EINPROGRESS) {
1908 common_nonsnoop_hash_unmap(dev, edesc, areq);
1909 kfree(edesc);
1910 }
1911 return ret;
1912}
1913
1914static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1915 unsigned int nbytes)
1916{
1917 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1918 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1919 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1920 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1921 bool is_sec1 = has_ftr_sec1(priv);
1922
1923 if (is_sec1)
1924 nbytes -= req_ctx->nbuf;
1925
1926 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1927 nbytes, 0, 0, 0, areq->base.flags, false);
1928}
1929
1930static int ahash_init(struct ahash_request *areq)
1931{
1932 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1933 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1934 struct device *dev = ctx->dev;
1935 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1936 unsigned int size;
1937 dma_addr_t dma;
1938
1939 /* Initialize the context */
1940 req_ctx->buf_idx = 0;
1941 req_ctx->nbuf = 0;
1942 req_ctx->first = 1; /* first indicates h/w must init its context */
1943 req_ctx->swinit = 0; /* assume h/w init of context */
1944 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1945 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1946 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1947 req_ctx->hw_context_size = size;
1948
1949 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
1950 DMA_TO_DEVICE);
1951 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
1952
1953 return 0;
1954}
1955
1956/*
1957 * on h/w without explicit sha224 support, we initialize h/w context
1958 * manually with sha224 constants, and tell it to run sha256.
1959 */
1960static int ahash_init_sha224_swinit(struct ahash_request *areq)
1961{
1962 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1963
1964 req_ctx->hw_context[0] = SHA224_H0;
1965 req_ctx->hw_context[1] = SHA224_H1;
1966 req_ctx->hw_context[2] = SHA224_H2;
1967 req_ctx->hw_context[3] = SHA224_H3;
1968 req_ctx->hw_context[4] = SHA224_H4;
1969 req_ctx->hw_context[5] = SHA224_H5;
1970 req_ctx->hw_context[6] = SHA224_H6;
1971 req_ctx->hw_context[7] = SHA224_H7;
1972
1973 /* init 64-bit count */
1974 req_ctx->hw_context[8] = 0;
1975 req_ctx->hw_context[9] = 0;
1976
1977 ahash_init(areq);
1978 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1979
1980 return 0;
1981}
1982
1983static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1984{
1985 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1986 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1987 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1988 struct talitos_edesc *edesc;
1989 unsigned int blocksize =
1990 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1991 unsigned int nbytes_to_hash;
1992 unsigned int to_hash_later;
1993 unsigned int nsg;
1994 int nents;
1995 struct device *dev = ctx->dev;
1996 struct talitos_private *priv = dev_get_drvdata(dev);
1997 bool is_sec1 = has_ftr_sec1(priv);
1998 u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
1999
2000 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
2001 /* Buffer up to one whole block */
2002 nents = sg_nents_for_len(areq->src, nbytes);
2003 if (nents < 0) {
2004 dev_err(ctx->dev, "Invalid number of src SG.\n");
2005 return nents;
2006 }
2007 sg_copy_to_buffer(areq->src, nents,
2008 ctx_buf + req_ctx->nbuf, nbytes);
2009 req_ctx->nbuf += nbytes;
2010 return 0;
2011 }
2012
2013 /* At least (blocksize + 1) bytes are available to hash */
2014 nbytes_to_hash = nbytes + req_ctx->nbuf;
2015 to_hash_later = nbytes_to_hash & (blocksize - 1);
2016
2017 if (req_ctx->last)
2018 to_hash_later = 0;
2019 else if (to_hash_later)
2020 /* There is a partial block. Hash the full block(s) now */
2021 nbytes_to_hash -= to_hash_later;
2022 else {
2023 /* Keep one block buffered */
2024 nbytes_to_hash -= blocksize;
2025 to_hash_later = blocksize;
2026 }
2027
2028 /* Chain in any previously buffered data */
2029 if (!is_sec1 && req_ctx->nbuf) {
2030 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2031 sg_init_table(req_ctx->bufsl, nsg);
2032 sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2033 if (nsg > 1)
2034 sg_chain(req_ctx->bufsl, 2, areq->src);
2035 req_ctx->psrc = req_ctx->bufsl;
2036 } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2037 int offset;
2038
2039 if (nbytes_to_hash > blocksize)
2040 offset = blocksize - req_ctx->nbuf;
2041 else
2042 offset = nbytes_to_hash - req_ctx->nbuf;
2043 nents = sg_nents_for_len(areq->src, offset);
2044 if (nents < 0) {
2045 dev_err(ctx->dev, "Invalid number of src SG.\n");
2046 return nents;
2047 }
2048 sg_copy_to_buffer(areq->src, nents,
2049 ctx_buf + req_ctx->nbuf, offset);
2050 req_ctx->nbuf += offset;
2051 req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
2052 offset);
2053 } else
2054 req_ctx->psrc = areq->src;
2055
2056 if (to_hash_later) {
2057 nents = sg_nents_for_len(areq->src, nbytes);
2058 if (nents < 0) {
2059 dev_err(ctx->dev, "Invalid number of src SG.\n");
2060 return nents;
2061 }
2062 sg_pcopy_to_buffer(areq->src, nents,
2063 req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2064 to_hash_later,
2065 nbytes - to_hash_later);
2066 }
2067 req_ctx->to_hash_later = to_hash_later;
2068
2069 /* Allocate extended descriptor */
2070 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2071 if (IS_ERR(edesc))
2072 return PTR_ERR(edesc);
2073
2074 edesc->desc.hdr = ctx->desc_hdr_template;
2075
2076 /* On last one, request SEC to pad; otherwise continue */
2077 if (req_ctx->last)
2078 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2079 else
2080 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2081
2082 /* request SEC to INIT hash. */
2083 if (req_ctx->first && !req_ctx->swinit)
2084 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2085
2086 /* When the tfm context has a keylen, it's an HMAC.
2087 * A first or last (ie. not middle) descriptor must request HMAC.
2088 */
2089 if (ctx->keylen && (req_ctx->first || req_ctx->last))
2090 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2091
2092 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
2093}
2094
2095static int ahash_update(struct ahash_request *areq)
2096{
2097 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2098
2099 req_ctx->last = 0;
2100
2101 return ahash_process_req(areq, areq->nbytes);
2102}
2103
2104static int ahash_final(struct ahash_request *areq)
2105{
2106 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2107
2108 req_ctx->last = 1;
2109
2110 return ahash_process_req(areq, 0);
2111}
2112
2113static int ahash_finup(struct ahash_request *areq)
2114{
2115 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2116
2117 req_ctx->last = 1;
2118
2119 return ahash_process_req(areq, areq->nbytes);
2120}
2121
2122static int ahash_digest(struct ahash_request *areq)
2123{
2124 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2125 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2126
2127 ahash->init(areq);
2128 req_ctx->last = 1;
2129
2130 return ahash_process_req(areq, areq->nbytes);
2131}
2132
2133static int ahash_export(struct ahash_request *areq, void *out)
2134{
2135 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2136 struct talitos_export_state *export = out;
2137 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2138 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2139 struct device *dev = ctx->dev;
2140 dma_addr_t dma;
2141
2142 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2143 DMA_FROM_DEVICE);
2144 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2145
2146 memcpy(export->hw_context, req_ctx->hw_context,
2147 req_ctx->hw_context_size);
2148 memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2149 export->swinit = req_ctx->swinit;
2150 export->first = req_ctx->first;
2151 export->last = req_ctx->last;
2152 export->to_hash_later = req_ctx->to_hash_later;
2153 export->nbuf = req_ctx->nbuf;
2154
2155 return 0;
2156}
2157
2158static int ahash_import(struct ahash_request *areq, const void *in)
2159{
2160 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2161 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2162 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2163 struct device *dev = ctx->dev;
2164 const struct talitos_export_state *export = in;
2165 unsigned int size;
2166 dma_addr_t dma;
2167
2168 memset(req_ctx, 0, sizeof(*req_ctx));
2169 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2170 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2171 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2172 req_ctx->hw_context_size = size;
2173 memcpy(req_ctx->hw_context, export->hw_context, size);
2174 memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2175 req_ctx->swinit = export->swinit;
2176 req_ctx->first = export->first;
2177 req_ctx->last = export->last;
2178 req_ctx->to_hash_later = export->to_hash_later;
2179 req_ctx->nbuf = export->nbuf;
2180
2181 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2182 DMA_TO_DEVICE);
2183 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2184
2185 return 0;
2186}
2187
2188static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2189 u8 *hash)
2190{
2191 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2192
2193 struct scatterlist sg[1];
2194 struct ahash_request *req;
2195 struct crypto_wait wait;
2196 int ret;
2197
2198 crypto_init_wait(&wait);
2199
2200 req = ahash_request_alloc(tfm, GFP_KERNEL);
2201 if (!req)
2202 return -ENOMEM;
2203
2204 /* Keep tfm keylen == 0 during hash of the long key */
2205 ctx->keylen = 0;
2206 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2207 crypto_req_done, &wait);
2208
2209 sg_init_one(&sg[0], key, keylen);
2210
2211 ahash_request_set_crypt(req, sg, hash, keylen);
2212 ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2213
2214 ahash_request_free(req);
2215
2216 return ret;
2217}
2218
2219static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2220 unsigned int keylen)
2221{
2222 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2223 struct device *dev = ctx->dev;
2224 unsigned int blocksize =
2225 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2226 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2227 unsigned int keysize = keylen;
2228 u8 hash[SHA512_DIGEST_SIZE];
2229 int ret;
2230
2231 if (keylen <= blocksize)
2232 memcpy(ctx->key, key, keysize);
2233 else {
2234 /* Must get the hash of the long key */
2235 ret = keyhash(tfm, key, keylen, hash);
2236
2237 if (ret) {
2238 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2239 return -EINVAL;
2240 }
2241
2242 keysize = digestsize;
2243 memcpy(ctx->key, hash, digestsize);
2244 }
2245
2246 if (ctx->keylen)
2247 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2248
2249 ctx->keylen = keysize;
2250 ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2251
2252 return 0;
2253}
2254
2255
2256struct talitos_alg_template {
2257 u32 type;
2258 u32 priority;
2259 union {
2260 struct crypto_alg crypto;
2261 struct ahash_alg hash;
2262 struct aead_alg aead;
2263 } alg;
2264 __be32 desc_hdr_template;
2265};
2266
2267static struct talitos_alg_template driver_algs[] = {
2268 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2269 { .type = CRYPTO_ALG_TYPE_AEAD,
2270 .alg.aead = {
2271 .base = {
2272 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2273 .cra_driver_name = "authenc-hmac-sha1-"
2274 "cbc-aes-talitos",
2275 .cra_blocksize = AES_BLOCK_SIZE,
2276 .cra_flags = CRYPTO_ALG_ASYNC,
2277 },
2278 .ivsize = AES_BLOCK_SIZE,
2279 .maxauthsize = SHA1_DIGEST_SIZE,
2280 },
2281 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2282 DESC_HDR_SEL0_AESU |
2283 DESC_HDR_MODE0_AESU_CBC |
2284 DESC_HDR_SEL1_MDEUA |
2285 DESC_HDR_MODE1_MDEU_INIT |
2286 DESC_HDR_MODE1_MDEU_PAD |
2287 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2288 },
2289 { .type = CRYPTO_ALG_TYPE_AEAD,
2290 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2291 .alg.aead = {
2292 .base = {
2293 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2294 .cra_driver_name = "authenc-hmac-sha1-"
2295 "cbc-aes-talitos-hsna",
2296 .cra_blocksize = AES_BLOCK_SIZE,
2297 .cra_flags = CRYPTO_ALG_ASYNC,
2298 },
2299 .ivsize = AES_BLOCK_SIZE,
2300 .maxauthsize = SHA1_DIGEST_SIZE,
2301 },
2302 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2303 DESC_HDR_SEL0_AESU |
2304 DESC_HDR_MODE0_AESU_CBC |
2305 DESC_HDR_SEL1_MDEUA |
2306 DESC_HDR_MODE1_MDEU_INIT |
2307 DESC_HDR_MODE1_MDEU_PAD |
2308 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2309 },
2310 { .type = CRYPTO_ALG_TYPE_AEAD,
2311 .alg.aead = {
2312 .base = {
2313 .cra_name = "authenc(hmac(sha1),"
2314 "cbc(des3_ede))",
2315 .cra_driver_name = "authenc-hmac-sha1-"
2316 "cbc-3des-talitos",
2317 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2318 .cra_flags = CRYPTO_ALG_ASYNC,
2319 },
2320 .ivsize = DES3_EDE_BLOCK_SIZE,
2321 .maxauthsize = SHA1_DIGEST_SIZE,
2322 .setkey = aead_des3_setkey,
2323 },
2324 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2325 DESC_HDR_SEL0_DEU |
2326 DESC_HDR_MODE0_DEU_CBC |
2327 DESC_HDR_MODE0_DEU_3DES |
2328 DESC_HDR_SEL1_MDEUA |
2329 DESC_HDR_MODE1_MDEU_INIT |
2330 DESC_HDR_MODE1_MDEU_PAD |
2331 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2332 },
2333 { .type = CRYPTO_ALG_TYPE_AEAD,
2334 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2335 .alg.aead = {
2336 .base = {
2337 .cra_name = "authenc(hmac(sha1),"
2338 "cbc(des3_ede))",
2339 .cra_driver_name = "authenc-hmac-sha1-"
2340 "cbc-3des-talitos-hsna",
2341 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2342 .cra_flags = CRYPTO_ALG_ASYNC,
2343 },
2344 .ivsize = DES3_EDE_BLOCK_SIZE,
2345 .maxauthsize = SHA1_DIGEST_SIZE,
2346 .setkey = aead_des3_setkey,
2347 },
2348 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2349 DESC_HDR_SEL0_DEU |
2350 DESC_HDR_MODE0_DEU_CBC |
2351 DESC_HDR_MODE0_DEU_3DES |
2352 DESC_HDR_SEL1_MDEUA |
2353 DESC_HDR_MODE1_MDEU_INIT |
2354 DESC_HDR_MODE1_MDEU_PAD |
2355 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2356 },
2357 { .type = CRYPTO_ALG_TYPE_AEAD,
2358 .alg.aead = {
2359 .base = {
2360 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2361 .cra_driver_name = "authenc-hmac-sha224-"
2362 "cbc-aes-talitos",
2363 .cra_blocksize = AES_BLOCK_SIZE,
2364 .cra_flags = CRYPTO_ALG_ASYNC,
2365 },
2366 .ivsize = AES_BLOCK_SIZE,
2367 .maxauthsize = SHA224_DIGEST_SIZE,
2368 },
2369 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2370 DESC_HDR_SEL0_AESU |
2371 DESC_HDR_MODE0_AESU_CBC |
2372 DESC_HDR_SEL1_MDEUA |
2373 DESC_HDR_MODE1_MDEU_INIT |
2374 DESC_HDR_MODE1_MDEU_PAD |
2375 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2376 },
2377 { .type = CRYPTO_ALG_TYPE_AEAD,
2378 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2379 .alg.aead = {
2380 .base = {
2381 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2382 .cra_driver_name = "authenc-hmac-sha224-"
2383 "cbc-aes-talitos-hsna",
2384 .cra_blocksize = AES_BLOCK_SIZE,
2385 .cra_flags = CRYPTO_ALG_ASYNC,
2386 },
2387 .ivsize = AES_BLOCK_SIZE,
2388 .maxauthsize = SHA224_DIGEST_SIZE,
2389 },
2390 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2391 DESC_HDR_SEL0_AESU |
2392 DESC_HDR_MODE0_AESU_CBC |
2393 DESC_HDR_SEL1_MDEUA |
2394 DESC_HDR_MODE1_MDEU_INIT |
2395 DESC_HDR_MODE1_MDEU_PAD |
2396 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2397 },
2398 { .type = CRYPTO_ALG_TYPE_AEAD,
2399 .alg.aead = {
2400 .base = {
2401 .cra_name = "authenc(hmac(sha224),"
2402 "cbc(des3_ede))",
2403 .cra_driver_name = "authenc-hmac-sha224-"
2404 "cbc-3des-talitos",
2405 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2406 .cra_flags = CRYPTO_ALG_ASYNC,
2407 },
2408 .ivsize = DES3_EDE_BLOCK_SIZE,
2409 .maxauthsize = SHA224_DIGEST_SIZE,
2410 .setkey = aead_des3_setkey,
2411 },
2412 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2413 DESC_HDR_SEL0_DEU |
2414 DESC_HDR_MODE0_DEU_CBC |
2415 DESC_HDR_MODE0_DEU_3DES |
2416 DESC_HDR_SEL1_MDEUA |
2417 DESC_HDR_MODE1_MDEU_INIT |
2418 DESC_HDR_MODE1_MDEU_PAD |
2419 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2420 },
2421 { .type = CRYPTO_ALG_TYPE_AEAD,
2422 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2423 .alg.aead = {
2424 .base = {
2425 .cra_name = "authenc(hmac(sha224),"
2426 "cbc(des3_ede))",
2427 .cra_driver_name = "authenc-hmac-sha224-"
2428 "cbc-3des-talitos-hsna",
2429 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2430 .cra_flags = CRYPTO_ALG_ASYNC,
2431 },
2432 .ivsize = DES3_EDE_BLOCK_SIZE,
2433 .maxauthsize = SHA224_DIGEST_SIZE,
2434 .setkey = aead_des3_setkey,
2435 },
2436 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2437 DESC_HDR_SEL0_DEU |
2438 DESC_HDR_MODE0_DEU_CBC |
2439 DESC_HDR_MODE0_DEU_3DES |
2440 DESC_HDR_SEL1_MDEUA |
2441 DESC_HDR_MODE1_MDEU_INIT |
2442 DESC_HDR_MODE1_MDEU_PAD |
2443 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2444 },
2445 { .type = CRYPTO_ALG_TYPE_AEAD,
2446 .alg.aead = {
2447 .base = {
2448 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2449 .cra_driver_name = "authenc-hmac-sha256-"
2450 "cbc-aes-talitos",
2451 .cra_blocksize = AES_BLOCK_SIZE,
2452 .cra_flags = CRYPTO_ALG_ASYNC,
2453 },
2454 .ivsize = AES_BLOCK_SIZE,
2455 .maxauthsize = SHA256_DIGEST_SIZE,
2456 },
2457 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2458 DESC_HDR_SEL0_AESU |
2459 DESC_HDR_MODE0_AESU_CBC |
2460 DESC_HDR_SEL1_MDEUA |
2461 DESC_HDR_MODE1_MDEU_INIT |
2462 DESC_HDR_MODE1_MDEU_PAD |
2463 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2464 },
2465 { .type = CRYPTO_ALG_TYPE_AEAD,
2466 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2467 .alg.aead = {
2468 .base = {
2469 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2470 .cra_driver_name = "authenc-hmac-sha256-"
2471 "cbc-aes-talitos-hsna",
2472 .cra_blocksize = AES_BLOCK_SIZE,
2473 .cra_flags = CRYPTO_ALG_ASYNC,
2474 },
2475 .ivsize = AES_BLOCK_SIZE,
2476 .maxauthsize = SHA256_DIGEST_SIZE,
2477 },
2478 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2479 DESC_HDR_SEL0_AESU |
2480 DESC_HDR_MODE0_AESU_CBC |
2481 DESC_HDR_SEL1_MDEUA |
2482 DESC_HDR_MODE1_MDEU_INIT |
2483 DESC_HDR_MODE1_MDEU_PAD |
2484 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2485 },
2486 { .type = CRYPTO_ALG_TYPE_AEAD,
2487 .alg.aead = {
2488 .base = {
2489 .cra_name = "authenc(hmac(sha256),"
2490 "cbc(des3_ede))",
2491 .cra_driver_name = "authenc-hmac-sha256-"
2492 "cbc-3des-talitos",
2493 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2494 .cra_flags = CRYPTO_ALG_ASYNC,
2495 },
2496 .ivsize = DES3_EDE_BLOCK_SIZE,
2497 .maxauthsize = SHA256_DIGEST_SIZE,
2498 .setkey = aead_des3_setkey,
2499 },
2500 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2501 DESC_HDR_SEL0_DEU |
2502 DESC_HDR_MODE0_DEU_CBC |
2503 DESC_HDR_MODE0_DEU_3DES |
2504 DESC_HDR_SEL1_MDEUA |
2505 DESC_HDR_MODE1_MDEU_INIT |
2506 DESC_HDR_MODE1_MDEU_PAD |
2507 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2508 },
2509 { .type = CRYPTO_ALG_TYPE_AEAD,
2510 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2511 .alg.aead = {
2512 .base = {
2513 .cra_name = "authenc(hmac(sha256),"
2514 "cbc(des3_ede))",
2515 .cra_driver_name = "authenc-hmac-sha256-"
2516 "cbc-3des-talitos-hsna",
2517 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2518 .cra_flags = CRYPTO_ALG_ASYNC,
2519 },
2520 .ivsize = DES3_EDE_BLOCK_SIZE,
2521 .maxauthsize = SHA256_DIGEST_SIZE,
2522 .setkey = aead_des3_setkey,
2523 },
2524 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2525 DESC_HDR_SEL0_DEU |
2526 DESC_HDR_MODE0_DEU_CBC |
2527 DESC_HDR_MODE0_DEU_3DES |
2528 DESC_HDR_SEL1_MDEUA |
2529 DESC_HDR_MODE1_MDEU_INIT |
2530 DESC_HDR_MODE1_MDEU_PAD |
2531 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2532 },
2533 { .type = CRYPTO_ALG_TYPE_AEAD,
2534 .alg.aead = {
2535 .base = {
2536 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2537 .cra_driver_name = "authenc-hmac-sha384-"
2538 "cbc-aes-talitos",
2539 .cra_blocksize = AES_BLOCK_SIZE,
2540 .cra_flags = CRYPTO_ALG_ASYNC,
2541 },
2542 .ivsize = AES_BLOCK_SIZE,
2543 .maxauthsize = SHA384_DIGEST_SIZE,
2544 },
2545 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2546 DESC_HDR_SEL0_AESU |
2547 DESC_HDR_MODE0_AESU_CBC |
2548 DESC_HDR_SEL1_MDEUB |
2549 DESC_HDR_MODE1_MDEU_INIT |
2550 DESC_HDR_MODE1_MDEU_PAD |
2551 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2552 },
2553 { .type = CRYPTO_ALG_TYPE_AEAD,
2554 .alg.aead = {
2555 .base = {
2556 .cra_name = "authenc(hmac(sha384),"
2557 "cbc(des3_ede))",
2558 .cra_driver_name = "authenc-hmac-sha384-"
2559 "cbc-3des-talitos",
2560 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2561 .cra_flags = CRYPTO_ALG_ASYNC,
2562 },
2563 .ivsize = DES3_EDE_BLOCK_SIZE,
2564 .maxauthsize = SHA384_DIGEST_SIZE,
2565 .setkey = aead_des3_setkey,
2566 },
2567 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2568 DESC_HDR_SEL0_DEU |
2569 DESC_HDR_MODE0_DEU_CBC |
2570 DESC_HDR_MODE0_DEU_3DES |
2571 DESC_HDR_SEL1_MDEUB |
2572 DESC_HDR_MODE1_MDEU_INIT |
2573 DESC_HDR_MODE1_MDEU_PAD |
2574 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2575 },
2576 { .type = CRYPTO_ALG_TYPE_AEAD,
2577 .alg.aead = {
2578 .base = {
2579 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2580 .cra_driver_name = "authenc-hmac-sha512-"
2581 "cbc-aes-talitos",
2582 .cra_blocksize = AES_BLOCK_SIZE,
2583 .cra_flags = CRYPTO_ALG_ASYNC,
2584 },
2585 .ivsize = AES_BLOCK_SIZE,
2586 .maxauthsize = SHA512_DIGEST_SIZE,
2587 },
2588 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2589 DESC_HDR_SEL0_AESU |
2590 DESC_HDR_MODE0_AESU_CBC |
2591 DESC_HDR_SEL1_MDEUB |
2592 DESC_HDR_MODE1_MDEU_INIT |
2593 DESC_HDR_MODE1_MDEU_PAD |
2594 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2595 },
2596 { .type = CRYPTO_ALG_TYPE_AEAD,
2597 .alg.aead = {
2598 .base = {
2599 .cra_name = "authenc(hmac(sha512),"
2600 "cbc(des3_ede))",
2601 .cra_driver_name = "authenc-hmac-sha512-"
2602 "cbc-3des-talitos",
2603 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2604 .cra_flags = CRYPTO_ALG_ASYNC,
2605 },
2606 .ivsize = DES3_EDE_BLOCK_SIZE,
2607 .maxauthsize = SHA512_DIGEST_SIZE,
2608 .setkey = aead_des3_setkey,
2609 },
2610 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2611 DESC_HDR_SEL0_DEU |
2612 DESC_HDR_MODE0_DEU_CBC |
2613 DESC_HDR_MODE0_DEU_3DES |
2614 DESC_HDR_SEL1_MDEUB |
2615 DESC_HDR_MODE1_MDEU_INIT |
2616 DESC_HDR_MODE1_MDEU_PAD |
2617 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2618 },
2619 { .type = CRYPTO_ALG_TYPE_AEAD,
2620 .alg.aead = {
2621 .base = {
2622 .cra_name = "authenc(hmac(md5),cbc(aes))",
2623 .cra_driver_name = "authenc-hmac-md5-"
2624 "cbc-aes-talitos",
2625 .cra_blocksize = AES_BLOCK_SIZE,
2626 .cra_flags = CRYPTO_ALG_ASYNC,
2627 },
2628 .ivsize = AES_BLOCK_SIZE,
2629 .maxauthsize = MD5_DIGEST_SIZE,
2630 },
2631 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2632 DESC_HDR_SEL0_AESU |
2633 DESC_HDR_MODE0_AESU_CBC |
2634 DESC_HDR_SEL1_MDEUA |
2635 DESC_HDR_MODE1_MDEU_INIT |
2636 DESC_HDR_MODE1_MDEU_PAD |
2637 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2638 },
2639 { .type = CRYPTO_ALG_TYPE_AEAD,
2640 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2641 .alg.aead = {
2642 .base = {
2643 .cra_name = "authenc(hmac(md5),cbc(aes))",
2644 .cra_driver_name = "authenc-hmac-md5-"
2645 "cbc-aes-talitos-hsna",
2646 .cra_blocksize = AES_BLOCK_SIZE,
2647 .cra_flags = CRYPTO_ALG_ASYNC,
2648 },
2649 .ivsize = AES_BLOCK_SIZE,
2650 .maxauthsize = MD5_DIGEST_SIZE,
2651 },
2652 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2653 DESC_HDR_SEL0_AESU |
2654 DESC_HDR_MODE0_AESU_CBC |
2655 DESC_HDR_SEL1_MDEUA |
2656 DESC_HDR_MODE1_MDEU_INIT |
2657 DESC_HDR_MODE1_MDEU_PAD |
2658 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2659 },
2660 { .type = CRYPTO_ALG_TYPE_AEAD,
2661 .alg.aead = {
2662 .base = {
2663 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2664 .cra_driver_name = "authenc-hmac-md5-"
2665 "cbc-3des-talitos",
2666 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2667 .cra_flags = CRYPTO_ALG_ASYNC,
2668 },
2669 .ivsize = DES3_EDE_BLOCK_SIZE,
2670 .maxauthsize = MD5_DIGEST_SIZE,
2671 .setkey = aead_des3_setkey,
2672 },
2673 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2674 DESC_HDR_SEL0_DEU |
2675 DESC_HDR_MODE0_DEU_CBC |
2676 DESC_HDR_MODE0_DEU_3DES |
2677 DESC_HDR_SEL1_MDEUA |
2678 DESC_HDR_MODE1_MDEU_INIT |
2679 DESC_HDR_MODE1_MDEU_PAD |
2680 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2681 },
2682 { .type = CRYPTO_ALG_TYPE_AEAD,
2683 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2684 .alg.aead = {
2685 .base = {
2686 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2687 .cra_driver_name = "authenc-hmac-md5-"
2688 "cbc-3des-talitos-hsna",
2689 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2690 .cra_flags = CRYPTO_ALG_ASYNC,
2691 },
2692 .ivsize = DES3_EDE_BLOCK_SIZE,
2693 .maxauthsize = MD5_DIGEST_SIZE,
2694 .setkey = aead_des3_setkey,
2695 },
2696 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2697 DESC_HDR_SEL0_DEU |
2698 DESC_HDR_MODE0_DEU_CBC |
2699 DESC_HDR_MODE0_DEU_3DES |
2700 DESC_HDR_SEL1_MDEUA |
2701 DESC_HDR_MODE1_MDEU_INIT |
2702 DESC_HDR_MODE1_MDEU_PAD |
2703 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2704 },
2705 /* ABLKCIPHER algorithms. */
2706 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2707 .alg.crypto = {
2708 .cra_name = "ecb(aes)",
2709 .cra_driver_name = "ecb-aes-talitos",
2710 .cra_blocksize = AES_BLOCK_SIZE,
2711 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2712 CRYPTO_ALG_ASYNC,
2713 .cra_ablkcipher = {
2714 .min_keysize = AES_MIN_KEY_SIZE,
2715 .max_keysize = AES_MAX_KEY_SIZE,
2716 .setkey = ablkcipher_aes_setkey,
2717 }
2718 },
2719 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2720 DESC_HDR_SEL0_AESU,
2721 },
2722 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2723 .alg.crypto = {
2724 .cra_name = "cbc(aes)",
2725 .cra_driver_name = "cbc-aes-talitos",
2726 .cra_blocksize = AES_BLOCK_SIZE,
2727 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2728 CRYPTO_ALG_ASYNC,
2729 .cra_ablkcipher = {
2730 .min_keysize = AES_MIN_KEY_SIZE,
2731 .max_keysize = AES_MAX_KEY_SIZE,
2732 .ivsize = AES_BLOCK_SIZE,
2733 .setkey = ablkcipher_aes_setkey,
2734 }
2735 },
2736 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2737 DESC_HDR_SEL0_AESU |
2738 DESC_HDR_MODE0_AESU_CBC,
2739 },
2740 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2741 .alg.crypto = {
2742 .cra_name = "ctr(aes)",
2743 .cra_driver_name = "ctr-aes-talitos",
2744 .cra_blocksize = 1,
2745 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2746 CRYPTO_ALG_ASYNC,
2747 .cra_ablkcipher = {
2748 .min_keysize = AES_MIN_KEY_SIZE,
2749 .max_keysize = AES_MAX_KEY_SIZE,
2750 .ivsize = AES_BLOCK_SIZE,
2751 .setkey = ablkcipher_aes_setkey,
2752 }
2753 },
2754 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2755 DESC_HDR_SEL0_AESU |
2756 DESC_HDR_MODE0_AESU_CTR,
2757 },
2758 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2759 .alg.crypto = {
2760 .cra_name = "ecb(des)",
2761 .cra_driver_name = "ecb-des-talitos",
2762 .cra_blocksize = DES_BLOCK_SIZE,
2763 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2764 CRYPTO_ALG_ASYNC,
2765 .cra_ablkcipher = {
2766 .min_keysize = DES_KEY_SIZE,
2767 .max_keysize = DES_KEY_SIZE,
2768 .setkey = ablkcipher_des_setkey,
2769 }
2770 },
2771 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2772 DESC_HDR_SEL0_DEU,
2773 },
2774 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2775 .alg.crypto = {
2776 .cra_name = "cbc(des)",
2777 .cra_driver_name = "cbc-des-talitos",
2778 .cra_blocksize = DES_BLOCK_SIZE,
2779 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2780 CRYPTO_ALG_ASYNC,
2781 .cra_ablkcipher = {
2782 .min_keysize = DES_KEY_SIZE,
2783 .max_keysize = DES_KEY_SIZE,
2784 .ivsize = DES_BLOCK_SIZE,
2785 .setkey = ablkcipher_des_setkey,
2786 }
2787 },
2788 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2789 DESC_HDR_SEL0_DEU |
2790 DESC_HDR_MODE0_DEU_CBC,
2791 },
2792 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2793 .alg.crypto = {
2794 .cra_name = "ecb(des3_ede)",
2795 .cra_driver_name = "ecb-3des-talitos",
2796 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2797 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2798 CRYPTO_ALG_ASYNC,
2799 .cra_ablkcipher = {
2800 .min_keysize = DES3_EDE_KEY_SIZE,
2801 .max_keysize = DES3_EDE_KEY_SIZE,
2802 .setkey = ablkcipher_des3_setkey,
2803 }
2804 },
2805 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2806 DESC_HDR_SEL0_DEU |
2807 DESC_HDR_MODE0_DEU_3DES,
2808 },
2809 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2810 .alg.crypto = {
2811 .cra_name = "cbc(des3_ede)",
2812 .cra_driver_name = "cbc-3des-talitos",
2813 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2814 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2815 CRYPTO_ALG_ASYNC,
2816 .cra_ablkcipher = {
2817 .min_keysize = DES3_EDE_KEY_SIZE,
2818 .max_keysize = DES3_EDE_KEY_SIZE,
2819 .ivsize = DES3_EDE_BLOCK_SIZE,
2820 .setkey = ablkcipher_des3_setkey,
2821 }
2822 },
2823 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2824 DESC_HDR_SEL0_DEU |
2825 DESC_HDR_MODE0_DEU_CBC |
2826 DESC_HDR_MODE0_DEU_3DES,
2827 },
2828 /* AHASH algorithms. */
2829 { .type = CRYPTO_ALG_TYPE_AHASH,
2830 .alg.hash = {
2831 .halg.digestsize = MD5_DIGEST_SIZE,
2832 .halg.statesize = sizeof(struct talitos_export_state),
2833 .halg.base = {
2834 .cra_name = "md5",
2835 .cra_driver_name = "md5-talitos",
2836 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2837 .cra_flags = CRYPTO_ALG_ASYNC,
2838 }
2839 },
2840 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2841 DESC_HDR_SEL0_MDEUA |
2842 DESC_HDR_MODE0_MDEU_MD5,
2843 },
2844 { .type = CRYPTO_ALG_TYPE_AHASH,
2845 .alg.hash = {
2846 .halg.digestsize = SHA1_DIGEST_SIZE,
2847 .halg.statesize = sizeof(struct talitos_export_state),
2848 .halg.base = {
2849 .cra_name = "sha1",
2850 .cra_driver_name = "sha1-talitos",
2851 .cra_blocksize = SHA1_BLOCK_SIZE,
2852 .cra_flags = CRYPTO_ALG_ASYNC,
2853 }
2854 },
2855 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2856 DESC_HDR_SEL0_MDEUA |
2857 DESC_HDR_MODE0_MDEU_SHA1,
2858 },
2859 { .type = CRYPTO_ALG_TYPE_AHASH,
2860 .alg.hash = {
2861 .halg.digestsize = SHA224_DIGEST_SIZE,
2862 .halg.statesize = sizeof(struct talitos_export_state),
2863 .halg.base = {
2864 .cra_name = "sha224",
2865 .cra_driver_name = "sha224-talitos",
2866 .cra_blocksize = SHA224_BLOCK_SIZE,
2867 .cra_flags = CRYPTO_ALG_ASYNC,
2868 }
2869 },
2870 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2871 DESC_HDR_SEL0_MDEUA |
2872 DESC_HDR_MODE0_MDEU_SHA224,
2873 },
2874 { .type = CRYPTO_ALG_TYPE_AHASH,
2875 .alg.hash = {
2876 .halg.digestsize = SHA256_DIGEST_SIZE,
2877 .halg.statesize = sizeof(struct talitos_export_state),
2878 .halg.base = {
2879 .cra_name = "sha256",
2880 .cra_driver_name = "sha256-talitos",
2881 .cra_blocksize = SHA256_BLOCK_SIZE,
2882 .cra_flags = CRYPTO_ALG_ASYNC,
2883 }
2884 },
2885 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2886 DESC_HDR_SEL0_MDEUA |
2887 DESC_HDR_MODE0_MDEU_SHA256,
2888 },
2889 { .type = CRYPTO_ALG_TYPE_AHASH,
2890 .alg.hash = {
2891 .halg.digestsize = SHA384_DIGEST_SIZE,
2892 .halg.statesize = sizeof(struct talitos_export_state),
2893 .halg.base = {
2894 .cra_name = "sha384",
2895 .cra_driver_name = "sha384-talitos",
2896 .cra_blocksize = SHA384_BLOCK_SIZE,
2897 .cra_flags = CRYPTO_ALG_ASYNC,
2898 }
2899 },
2900 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2901 DESC_HDR_SEL0_MDEUB |
2902 DESC_HDR_MODE0_MDEUB_SHA384,
2903 },
2904 { .type = CRYPTO_ALG_TYPE_AHASH,
2905 .alg.hash = {
2906 .halg.digestsize = SHA512_DIGEST_SIZE,
2907 .halg.statesize = sizeof(struct talitos_export_state),
2908 .halg.base = {
2909 .cra_name = "sha512",
2910 .cra_driver_name = "sha512-talitos",
2911 .cra_blocksize = SHA512_BLOCK_SIZE,
2912 .cra_flags = CRYPTO_ALG_ASYNC,
2913 }
2914 },
2915 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2916 DESC_HDR_SEL0_MDEUB |
2917 DESC_HDR_MODE0_MDEUB_SHA512,
2918 },
2919 { .type = CRYPTO_ALG_TYPE_AHASH,
2920 .alg.hash = {
2921 .halg.digestsize = MD5_DIGEST_SIZE,
2922 .halg.statesize = sizeof(struct talitos_export_state),
2923 .halg.base = {
2924 .cra_name = "hmac(md5)",
2925 .cra_driver_name = "hmac-md5-talitos",
2926 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2927 .cra_flags = CRYPTO_ALG_ASYNC,
2928 }
2929 },
2930 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2931 DESC_HDR_SEL0_MDEUA |
2932 DESC_HDR_MODE0_MDEU_MD5,
2933 },
2934 { .type = CRYPTO_ALG_TYPE_AHASH,
2935 .alg.hash = {
2936 .halg.digestsize = SHA1_DIGEST_SIZE,
2937 .halg.statesize = sizeof(struct talitos_export_state),
2938 .halg.base = {
2939 .cra_name = "hmac(sha1)",
2940 .cra_driver_name = "hmac-sha1-talitos",
2941 .cra_blocksize = SHA1_BLOCK_SIZE,
2942 .cra_flags = CRYPTO_ALG_ASYNC,
2943 }
2944 },
2945 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2946 DESC_HDR_SEL0_MDEUA |
2947 DESC_HDR_MODE0_MDEU_SHA1,
2948 },
2949 { .type = CRYPTO_ALG_TYPE_AHASH,
2950 .alg.hash = {
2951 .halg.digestsize = SHA224_DIGEST_SIZE,
2952 .halg.statesize = sizeof(struct talitos_export_state),
2953 .halg.base = {
2954 .cra_name = "hmac(sha224)",
2955 .cra_driver_name = "hmac-sha224-talitos",
2956 .cra_blocksize = SHA224_BLOCK_SIZE,
2957 .cra_flags = CRYPTO_ALG_ASYNC,
2958 }
2959 },
2960 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2961 DESC_HDR_SEL0_MDEUA |
2962 DESC_HDR_MODE0_MDEU_SHA224,
2963 },
2964 { .type = CRYPTO_ALG_TYPE_AHASH,
2965 .alg.hash = {
2966 .halg.digestsize = SHA256_DIGEST_SIZE,
2967 .halg.statesize = sizeof(struct talitos_export_state),
2968 .halg.base = {
2969 .cra_name = "hmac(sha256)",
2970 .cra_driver_name = "hmac-sha256-talitos",
2971 .cra_blocksize = SHA256_BLOCK_SIZE,
2972 .cra_flags = CRYPTO_ALG_ASYNC,
2973 }
2974 },
2975 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2976 DESC_HDR_SEL0_MDEUA |
2977 DESC_HDR_MODE0_MDEU_SHA256,
2978 },
2979 { .type = CRYPTO_ALG_TYPE_AHASH,
2980 .alg.hash = {
2981 .halg.digestsize = SHA384_DIGEST_SIZE,
2982 .halg.statesize = sizeof(struct talitos_export_state),
2983 .halg.base = {
2984 .cra_name = "hmac(sha384)",
2985 .cra_driver_name = "hmac-sha384-talitos",
2986 .cra_blocksize = SHA384_BLOCK_SIZE,
2987 .cra_flags = CRYPTO_ALG_ASYNC,
2988 }
2989 },
2990 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2991 DESC_HDR_SEL0_MDEUB |
2992 DESC_HDR_MODE0_MDEUB_SHA384,
2993 },
2994 { .type = CRYPTO_ALG_TYPE_AHASH,
2995 .alg.hash = {
2996 .halg.digestsize = SHA512_DIGEST_SIZE,
2997 .halg.statesize = sizeof(struct talitos_export_state),
2998 .halg.base = {
2999 .cra_name = "hmac(sha512)",
3000 .cra_driver_name = "hmac-sha512-talitos",
3001 .cra_blocksize = SHA512_BLOCK_SIZE,
3002 .cra_flags = CRYPTO_ALG_ASYNC,
3003 }
3004 },
3005 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3006 DESC_HDR_SEL0_MDEUB |
3007 DESC_HDR_MODE0_MDEUB_SHA512,
3008 }
3009};
3010
3011struct talitos_crypto_alg {
3012 struct list_head entry;
3013 struct device *dev;
3014 struct talitos_alg_template algt;
3015};
3016
3017static int talitos_init_common(struct talitos_ctx *ctx,
3018 struct talitos_crypto_alg *talitos_alg)
3019{
3020 struct talitos_private *priv;
3021
3022 /* update context with ptr to dev */
3023 ctx->dev = talitos_alg->dev;
3024
3025 /* assign SEC channel to tfm in round-robin fashion */
3026 priv = dev_get_drvdata(ctx->dev);
3027 ctx->ch = atomic_inc_return(&priv->last_chan) &
3028 (priv->num_channels - 1);
3029
3030 /* copy descriptor header template value */
3031 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3032
3033 /* select done notification */
3034 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3035
3036 return 0;
3037}
3038
3039static int talitos_cra_init(struct crypto_tfm *tfm)
3040{
3041 struct crypto_alg *alg = tfm->__crt_alg;
3042 struct talitos_crypto_alg *talitos_alg;
3043 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3044
3045 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
3046 talitos_alg = container_of(__crypto_ahash_alg(alg),
3047 struct talitos_crypto_alg,
3048 algt.alg.hash);
3049 else
3050 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3051 algt.alg.crypto);
3052
3053 return talitos_init_common(ctx, talitos_alg);
3054}
3055
3056static int talitos_cra_init_aead(struct crypto_aead *tfm)
3057{
3058 struct aead_alg *alg = crypto_aead_alg(tfm);
3059 struct talitos_crypto_alg *talitos_alg;
3060 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3061
3062 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3063 algt.alg.aead);
3064
3065 return talitos_init_common(ctx, talitos_alg);
3066}
3067
3068static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3069{
3070 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3071
3072 talitos_cra_init(tfm);
3073
3074 ctx->keylen = 0;
3075 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3076 sizeof(struct talitos_ahash_req_ctx));
3077
3078 return 0;
3079}
3080
3081static void talitos_cra_exit(struct crypto_tfm *tfm)
3082{
3083 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3084 struct device *dev = ctx->dev;
3085
3086 if (ctx->keylen)
3087 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3088}
3089
3090/*
3091 * given the alg's descriptor header template, determine whether descriptor
3092 * type and primary/secondary execution units required match the hw
3093 * capabilities description provided in the device tree node.
3094 */
3095static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3096{
3097 struct talitos_private *priv = dev_get_drvdata(dev);
3098 int ret;
3099
3100 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3101 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3102
3103 if (SECONDARY_EU(desc_hdr_template))
3104 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3105 & priv->exec_units);
3106
3107 return ret;
3108}
3109
3110static int talitos_remove(struct platform_device *ofdev)
3111{
3112 struct device *dev = &ofdev->dev;
3113 struct talitos_private *priv = dev_get_drvdata(dev);
3114 struct talitos_crypto_alg *t_alg, *n;
3115 int i;
3116
3117 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3118 switch (t_alg->algt.type) {
3119 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3120 break;
3121 case CRYPTO_ALG_TYPE_AEAD:
3122 crypto_unregister_aead(&t_alg->algt.alg.aead);
3123 break;
3124 case CRYPTO_ALG_TYPE_AHASH:
3125 crypto_unregister_ahash(&t_alg->algt.alg.hash);
3126 break;
3127 }
3128 list_del(&t_alg->entry);
3129 }
3130
3131 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3132 talitos_unregister_rng(dev);
3133
3134 for (i = 0; i < 2; i++)
3135 if (priv->irq[i]) {
3136 free_irq(priv->irq[i], dev);
3137 irq_dispose_mapping(priv->irq[i]);
3138 }
3139
3140 tasklet_kill(&priv->done_task[0]);
3141 if (priv->irq[1])
3142 tasklet_kill(&priv->done_task[1]);
3143
3144 return 0;
3145}
3146
3147static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3148 struct talitos_alg_template
3149 *template)
3150{
3151 struct talitos_private *priv = dev_get_drvdata(dev);
3152 struct talitos_crypto_alg *t_alg;
3153 struct crypto_alg *alg;
3154
3155 t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3156 GFP_KERNEL);
3157 if (!t_alg)
3158 return ERR_PTR(-ENOMEM);
3159
3160 t_alg->algt = *template;
3161
3162 switch (t_alg->algt.type) {
3163 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3164 alg = &t_alg->algt.alg.crypto;
3165 alg->cra_init = talitos_cra_init;
3166 alg->cra_exit = talitos_cra_exit;
3167 alg->cra_type = &crypto_ablkcipher_type;
3168 alg->cra_ablkcipher.setkey = alg->cra_ablkcipher.setkey ?:
3169 ablkcipher_setkey;
3170 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3171 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3172 break;
3173 case CRYPTO_ALG_TYPE_AEAD:
3174 alg = &t_alg->algt.alg.aead.base;
3175 alg->cra_exit = talitos_cra_exit;
3176 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3177 t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
3178 aead_setkey;
3179 t_alg->algt.alg.aead.encrypt = aead_encrypt;
3180 t_alg->algt.alg.aead.decrypt = aead_decrypt;
3181 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3182 !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3183 devm_kfree(dev, t_alg);
3184 return ERR_PTR(-ENOTSUPP);
3185 }
3186 break;
3187 case CRYPTO_ALG_TYPE_AHASH:
3188 alg = &t_alg->algt.alg.hash.halg.base;
3189 alg->cra_init = talitos_cra_init_ahash;
3190 alg->cra_exit = talitos_cra_exit;
3191 t_alg->algt.alg.hash.init = ahash_init;
3192 t_alg->algt.alg.hash.update = ahash_update;
3193 t_alg->algt.alg.hash.final = ahash_final;
3194 t_alg->algt.alg.hash.finup = ahash_finup;
3195 t_alg->algt.alg.hash.digest = ahash_digest;
3196 if (!strncmp(alg->cra_name, "hmac", 4))
3197 t_alg->algt.alg.hash.setkey = ahash_setkey;
3198 t_alg->algt.alg.hash.import = ahash_import;
3199 t_alg->algt.alg.hash.export = ahash_export;
3200
3201 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3202 !strncmp(alg->cra_name, "hmac", 4)) {
3203 devm_kfree(dev, t_alg);
3204 return ERR_PTR(-ENOTSUPP);
3205 }
3206 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3207 (!strcmp(alg->cra_name, "sha224") ||
3208 !strcmp(alg->cra_name, "hmac(sha224)"))) {
3209 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3210 t_alg->algt.desc_hdr_template =
3211 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3212 DESC_HDR_SEL0_MDEUA |
3213 DESC_HDR_MODE0_MDEU_SHA256;
3214 }
3215 break;
3216 default:
3217 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3218 devm_kfree(dev, t_alg);
3219 return ERR_PTR(-EINVAL);
3220 }
3221
3222 alg->cra_module = THIS_MODULE;
3223 if (t_alg->algt.priority)
3224 alg->cra_priority = t_alg->algt.priority;
3225 else
3226 alg->cra_priority = TALITOS_CRA_PRIORITY;
3227 if (has_ftr_sec1(priv))
3228 alg->cra_alignmask = 3;
3229 else
3230 alg->cra_alignmask = 0;
3231 alg->cra_ctxsize = sizeof(struct talitos_ctx);
3232 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3233
3234 t_alg->dev = dev;
3235
3236 return t_alg;
3237}
3238
3239static int talitos_probe_irq(struct platform_device *ofdev)
3240{
3241 struct device *dev = &ofdev->dev;
3242 struct device_node *np = ofdev->dev.of_node;
3243 struct talitos_private *priv = dev_get_drvdata(dev);
3244 int err;
3245 bool is_sec1 = has_ftr_sec1(priv);
3246
3247 priv->irq[0] = irq_of_parse_and_map(np, 0);
3248 if (!priv->irq[0]) {
3249 dev_err(dev, "failed to map irq\n");
3250 return -EINVAL;
3251 }
3252 if (is_sec1) {
3253 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3254 dev_driver_string(dev), dev);
3255 goto primary_out;
3256 }
3257
3258 priv->irq[1] = irq_of_parse_and_map(np, 1);
3259
3260 /* get the primary irq line */
3261 if (!priv->irq[1]) {
3262 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3263 dev_driver_string(dev), dev);
3264 goto primary_out;
3265 }
3266
3267 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3268 dev_driver_string(dev), dev);
3269 if (err)
3270 goto primary_out;
3271
3272 /* get the secondary irq line */
3273 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3274 dev_driver_string(dev), dev);
3275 if (err) {
3276 dev_err(dev, "failed to request secondary irq\n");
3277 irq_dispose_mapping(priv->irq[1]);
3278 priv->irq[1] = 0;
3279 }
3280
3281 return err;
3282
3283primary_out:
3284 if (err) {
3285 dev_err(dev, "failed to request primary irq\n");
3286 irq_dispose_mapping(priv->irq[0]);
3287 priv->irq[0] = 0;
3288 }
3289
3290 return err;
3291}
3292
3293static int talitos_probe(struct platform_device *ofdev)
3294{
3295 struct device *dev = &ofdev->dev;
3296 struct device_node *np = ofdev->dev.of_node;
3297 struct talitos_private *priv;
3298 int i, err;
3299 int stride;
3300 struct resource *res;
3301
3302 priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3303 if (!priv)
3304 return -ENOMEM;
3305
3306 INIT_LIST_HEAD(&priv->alg_list);
3307
3308 dev_set_drvdata(dev, priv);
3309
3310 priv->ofdev = ofdev;
3311
3312 spin_lock_init(&priv->reg_lock);
3313
3314 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3315 if (!res)
3316 return -ENXIO;
3317 priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3318 if (!priv->reg) {
3319 dev_err(dev, "failed to of_iomap\n");
3320 err = -ENOMEM;
3321 goto err_out;
3322 }
3323
3324 /* get SEC version capabilities from device tree */
3325 of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3326 of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3327 of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3328 of_property_read_u32(np, "fsl,descriptor-types-mask",
3329 &priv->desc_types);
3330
3331 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3332 !priv->exec_units || !priv->desc_types) {
3333 dev_err(dev, "invalid property data in device tree node\n");
3334 err = -EINVAL;
3335 goto err_out;
3336 }
3337
3338 if (of_device_is_compatible(np, "fsl,sec3.0"))
3339 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3340
3341 if (of_device_is_compatible(np, "fsl,sec2.1"))
3342 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3343 TALITOS_FTR_SHA224_HWINIT |
3344 TALITOS_FTR_HMAC_OK;
3345
3346 if (of_device_is_compatible(np, "fsl,sec1.0"))
3347 priv->features |= TALITOS_FTR_SEC1;
3348
3349 if (of_device_is_compatible(np, "fsl,sec1.2")) {
3350 priv->reg_deu = priv->reg + TALITOS12_DEU;
3351 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3352 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3353 stride = TALITOS1_CH_STRIDE;
3354 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3355 priv->reg_deu = priv->reg + TALITOS10_DEU;
3356 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3357 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3358 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3359 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3360 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3361 stride = TALITOS1_CH_STRIDE;
3362 } else {
3363 priv->reg_deu = priv->reg + TALITOS2_DEU;
3364 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3365 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3366 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3367 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3368 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3369 priv->reg_keu = priv->reg + TALITOS2_KEU;
3370 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3371 stride = TALITOS2_CH_STRIDE;
3372 }
3373
3374 err = talitos_probe_irq(ofdev);
3375 if (err)
3376 goto err_out;
3377
3378 if (has_ftr_sec1(priv)) {
3379 if (priv->num_channels == 1)
3380 tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3381 (unsigned long)dev);
3382 else
3383 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3384 (unsigned long)dev);
3385 } else {
3386 if (priv->irq[1]) {
3387 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3388 (unsigned long)dev);
3389 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3390 (unsigned long)dev);
3391 } else if (priv->num_channels == 1) {
3392 tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3393 (unsigned long)dev);
3394 } else {
3395 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3396 (unsigned long)dev);
3397 }
3398 }
3399
3400 priv->chan = devm_kcalloc(dev,
3401 priv->num_channels,
3402 sizeof(struct talitos_channel),
3403 GFP_KERNEL);
3404 if (!priv->chan) {
3405 dev_err(dev, "failed to allocate channel management space\n");
3406 err = -ENOMEM;
3407 goto err_out;
3408 }
3409
3410 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3411
3412 for (i = 0; i < priv->num_channels; i++) {
3413 priv->chan[i].reg = priv->reg + stride * (i + 1);
3414 if (!priv->irq[1] || !(i & 1))
3415 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3416
3417 spin_lock_init(&priv->chan[i].head_lock);
3418 spin_lock_init(&priv->chan[i].tail_lock);
3419
3420 priv->chan[i].fifo = devm_kcalloc(dev,
3421 priv->fifo_len,
3422 sizeof(struct talitos_request),
3423 GFP_KERNEL);
3424 if (!priv->chan[i].fifo) {
3425 dev_err(dev, "failed to allocate request fifo %d\n", i);
3426 err = -ENOMEM;
3427 goto err_out;
3428 }
3429
3430 atomic_set(&priv->chan[i].submit_count,
3431 -(priv->chfifo_len - 1));
3432 }
3433
3434 dma_set_mask(dev, DMA_BIT_MASK(36));
3435
3436 /* reset and initialize the h/w */
3437 err = init_device(dev);
3438 if (err) {
3439 dev_err(dev, "failed to initialize device\n");
3440 goto err_out;
3441 }
3442
3443 /* register the RNG, if available */
3444 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3445 err = talitos_register_rng(dev);
3446 if (err) {
3447 dev_err(dev, "failed to register hwrng: %d\n", err);
3448 goto err_out;
3449 } else
3450 dev_info(dev, "hwrng\n");
3451 }
3452
3453 /* register crypto algorithms the device supports */
3454 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3455 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3456 struct talitos_crypto_alg *t_alg;
3457 struct crypto_alg *alg = NULL;
3458
3459 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3460 if (IS_ERR(t_alg)) {
3461 err = PTR_ERR(t_alg);
3462 if (err == -ENOTSUPP)
3463 continue;
3464 goto err_out;
3465 }
3466
3467 switch (t_alg->algt.type) {
3468 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3469 err = crypto_register_alg(
3470 &t_alg->algt.alg.crypto);
3471 alg = &t_alg->algt.alg.crypto;
3472 break;
3473
3474 case CRYPTO_ALG_TYPE_AEAD:
3475 err = crypto_register_aead(
3476 &t_alg->algt.alg.aead);
3477 alg = &t_alg->algt.alg.aead.base;
3478 break;
3479
3480 case CRYPTO_ALG_TYPE_AHASH:
3481 err = crypto_register_ahash(
3482 &t_alg->algt.alg.hash);
3483 alg = &t_alg->algt.alg.hash.halg.base;
3484 break;
3485 }
3486 if (err) {
3487 dev_err(dev, "%s alg registration failed\n",
3488 alg->cra_driver_name);
3489 devm_kfree(dev, t_alg);
3490 } else
3491 list_add_tail(&t_alg->entry, &priv->alg_list);
3492 }
3493 }
3494 if (!list_empty(&priv->alg_list))
3495 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3496 (char *)of_get_property(np, "compatible", NULL));
3497
3498 return 0;
3499
3500err_out:
3501 talitos_remove(ofdev);
3502
3503 return err;
3504}
3505
3506static const struct of_device_id talitos_match[] = {
3507#ifdef CONFIG_CRYPTO_DEV_TALITOS1
3508 {
3509 .compatible = "fsl,sec1.0",
3510 },
3511#endif
3512#ifdef CONFIG_CRYPTO_DEV_TALITOS2
3513 {
3514 .compatible = "fsl,sec2.0",
3515 },
3516#endif
3517 {},
3518};
3519MODULE_DEVICE_TABLE(of, talitos_match);
3520
3521static struct platform_driver talitos_driver = {
3522 .driver = {
3523 .name = "talitos",
3524 .of_match_table = talitos_match,
3525 },
3526 .probe = talitos_probe,
3527 .remove = talitos_remove,
3528};
3529
3530module_platform_driver(talitos_driver);
3531
3532MODULE_LICENSE("GPL");
3533MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3534MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");