blob: 634ae487c372e0213ae9d8b123d8def8a6abcd80 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
3 *
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/mod_devicetable.h>
31#include <linux/device.h>
32#include <linux/interrupt.h>
33#include <linux/crypto.h>
34#include <linux/hw_random.h>
35#include <linux/of_address.h>
36#include <linux/of_irq.h>
37#include <linux/of_platform.h>
38#include <linux/dma-mapping.h>
39#include <linux/io.h>
40#include <linux/spinlock.h>
41#include <linux/rtnetlink.h>
42#include <linux/slab.h>
43
44#include <crypto/algapi.h>
45#include <crypto/aes.h>
46#include <crypto/des.h>
47#include <crypto/sha.h>
48#include <crypto/md5.h>
49#include <crypto/internal/aead.h>
50#include <crypto/authenc.h>
51#include <crypto/skcipher.h>
52#include <crypto/hash.h>
53#include <crypto/internal/hash.h>
54#include <crypto/scatterwalk.h>
55
56#include "talitos.h"
57
58static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 unsigned int len, bool is_sec1)
60{
61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
62 if (is_sec1) {
63 ptr->len1 = cpu_to_be16(len);
64 } else {
65 ptr->len = cpu_to_be16(len);
66 ptr->eptr = upper_32_bits(dma_addr);
67 }
68}
69
70static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
71 struct talitos_ptr *src_ptr, bool is_sec1)
72{
73 dst_ptr->ptr = src_ptr->ptr;
74 if (is_sec1) {
75 dst_ptr->len1 = src_ptr->len1;
76 } else {
77 dst_ptr->len = src_ptr->len;
78 dst_ptr->eptr = src_ptr->eptr;
79 }
80}
81
82static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
83 bool is_sec1)
84{
85 if (is_sec1)
86 return be16_to_cpu(ptr->len1);
87 else
88 return be16_to_cpu(ptr->len);
89}
90
91static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
92 bool is_sec1)
93{
94 if (!is_sec1)
95 ptr->j_extent = val;
96}
97
98static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
99{
100 if (!is_sec1)
101 ptr->j_extent |= val;
102}
103
104/*
105 * map virtual single (contiguous) pointer to h/w descriptor pointer
106 */
107static void __map_single_talitos_ptr(struct device *dev,
108 struct talitos_ptr *ptr,
109 unsigned int len, void *data,
110 enum dma_data_direction dir,
111 unsigned long attrs)
112{
113 dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
114 struct talitos_private *priv = dev_get_drvdata(dev);
115 bool is_sec1 = has_ftr_sec1(priv);
116
117 to_talitos_ptr(ptr, dma_addr, len, is_sec1);
118}
119
120static void map_single_talitos_ptr(struct device *dev,
121 struct talitos_ptr *ptr,
122 unsigned int len, void *data,
123 enum dma_data_direction dir)
124{
125 __map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
126}
127
128static void map_single_talitos_ptr_nosync(struct device *dev,
129 struct talitos_ptr *ptr,
130 unsigned int len, void *data,
131 enum dma_data_direction dir)
132{
133 __map_single_talitos_ptr(dev, ptr, len, data, dir,
134 DMA_ATTR_SKIP_CPU_SYNC);
135}
136
137/*
138 * unmap bus single (contiguous) h/w descriptor pointer
139 */
140static void unmap_single_talitos_ptr(struct device *dev,
141 struct talitos_ptr *ptr,
142 enum dma_data_direction dir)
143{
144 struct talitos_private *priv = dev_get_drvdata(dev);
145 bool is_sec1 = has_ftr_sec1(priv);
146
147 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
148 from_talitos_ptr_len(ptr, is_sec1), dir);
149}
150
151static int reset_channel(struct device *dev, int ch)
152{
153 struct talitos_private *priv = dev_get_drvdata(dev);
154 unsigned int timeout = TALITOS_TIMEOUT;
155 bool is_sec1 = has_ftr_sec1(priv);
156
157 if (is_sec1) {
158 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
159 TALITOS1_CCCR_LO_RESET);
160
161 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
162 TALITOS1_CCCR_LO_RESET) && --timeout)
163 cpu_relax();
164 } else {
165 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
166 TALITOS2_CCCR_RESET);
167
168 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
169 TALITOS2_CCCR_RESET) && --timeout)
170 cpu_relax();
171 }
172
173 if (timeout == 0) {
174 dev_err(dev, "failed to reset channel %d\n", ch);
175 return -EIO;
176 }
177
178 /* set 36-bit addressing, done writeback enable and done IRQ enable */
179 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
180 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
181 /* enable chaining descriptors */
182 if (is_sec1)
183 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
184 TALITOS_CCCR_LO_NE);
185
186 /* and ICCR writeback, if available */
187 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
188 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
189 TALITOS_CCCR_LO_IWSE);
190
191 return 0;
192}
193
194static int reset_device(struct device *dev)
195{
196 struct talitos_private *priv = dev_get_drvdata(dev);
197 unsigned int timeout = TALITOS_TIMEOUT;
198 bool is_sec1 = has_ftr_sec1(priv);
199 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
200
201 setbits32(priv->reg + TALITOS_MCR, mcr);
202
203 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
204 && --timeout)
205 cpu_relax();
206
207 if (priv->irq[1]) {
208 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
209 setbits32(priv->reg + TALITOS_MCR, mcr);
210 }
211
212 if (timeout == 0) {
213 dev_err(dev, "failed to reset device\n");
214 return -EIO;
215 }
216
217 return 0;
218}
219
220/*
221 * Reset and initialize the device
222 */
223static int init_device(struct device *dev)
224{
225 struct talitos_private *priv = dev_get_drvdata(dev);
226 int ch, err;
227 bool is_sec1 = has_ftr_sec1(priv);
228
229 /*
230 * Master reset
231 * errata documentation: warning: certain SEC interrupts
232 * are not fully cleared by writing the MCR:SWR bit,
233 * set bit twice to completely reset
234 */
235 err = reset_device(dev);
236 if (err)
237 return err;
238
239 err = reset_device(dev);
240 if (err)
241 return err;
242
243 /* reset channels */
244 for (ch = 0; ch < priv->num_channels; ch++) {
245 err = reset_channel(dev, ch);
246 if (err)
247 return err;
248 }
249
250 /* enable channel done and error interrupts */
251 if (is_sec1) {
252 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
253 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
254 /* disable parity error check in DEU (erroneous? test vect.) */
255 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
256 } else {
257 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
258 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
259 }
260
261 /* disable integrity check error interrupts (use writeback instead) */
262 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
263 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
264 TALITOS_MDEUICR_LO_ICE);
265
266 return 0;
267}
268
269/**
270 * talitos_submit - submits a descriptor to the device for processing
271 * @dev: the SEC device to be used
272 * @ch: the SEC device channel to be used
273 * @desc: the descriptor to be processed by the device
274 * @callback: whom to call when processing is complete
275 * @context: a handle for use by caller (optional)
276 *
277 * desc must contain valid dma-mapped (bus physical) address pointers.
278 * callback must check err and feedback in descriptor header
279 * for device processing status.
280 */
281int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
282 void (*callback)(struct device *dev,
283 struct talitos_desc *desc,
284 void *context, int error),
285 void *context)
286{
287 struct talitos_private *priv = dev_get_drvdata(dev);
288 struct talitos_request *request;
289 unsigned long flags;
290 int head;
291 bool is_sec1 = has_ftr_sec1(priv);
292
293 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
294
295 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
296 /* h/w fifo is full */
297 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
298 return -EAGAIN;
299 }
300
301 head = priv->chan[ch].head;
302 request = &priv->chan[ch].fifo[head];
303
304 /* map descriptor and save caller data */
305 if (is_sec1) {
306 desc->hdr1 = desc->hdr;
307 request->dma_desc = dma_map_single(dev, &desc->hdr1,
308 TALITOS_DESC_SIZE,
309 DMA_BIDIRECTIONAL);
310 } else {
311 request->dma_desc = dma_map_single(dev, desc,
312 TALITOS_DESC_SIZE,
313 DMA_BIDIRECTIONAL);
314 }
315 request->callback = callback;
316 request->context = context;
317
318 /* increment fifo head */
319 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
320
321 smp_wmb();
322 request->desc = desc;
323
324 /* GO! */
325 wmb();
326 out_be32(priv->chan[ch].reg + TALITOS_FF,
327 upper_32_bits(request->dma_desc));
328 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
329 lower_32_bits(request->dma_desc));
330
331 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
332
333 return -EINPROGRESS;
334}
335EXPORT_SYMBOL(talitos_submit);
336
337static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
338{
339 struct talitos_edesc *edesc;
340
341 if (!is_sec1)
342 return request->desc->hdr;
343
344 if (!request->desc->next_desc)
345 return request->desc->hdr1;
346
347 edesc = container_of(request->desc, struct talitos_edesc, desc);
348
349 return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
350}
351
352/*
353 * process what was done, notify callback of error if not
354 */
355static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
356{
357 struct talitos_private *priv = dev_get_drvdata(dev);
358 struct talitos_request *request, saved_req;
359 unsigned long flags;
360 int tail, status;
361 bool is_sec1 = has_ftr_sec1(priv);
362
363 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
364
365 tail = priv->chan[ch].tail;
366 while (priv->chan[ch].fifo[tail].desc) {
367 __be32 hdr;
368
369 request = &priv->chan[ch].fifo[tail];
370
371 /* descriptors with their done bits set don't get the error */
372 rmb();
373 hdr = get_request_hdr(request, is_sec1);
374
375 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
376 status = 0;
377 else
378 if (!error)
379 break;
380 else
381 status = error;
382
383 dma_unmap_single(dev, request->dma_desc,
384 TALITOS_DESC_SIZE,
385 DMA_BIDIRECTIONAL);
386
387 /* copy entries so we can call callback outside lock */
388 saved_req.desc = request->desc;
389 saved_req.callback = request->callback;
390 saved_req.context = request->context;
391
392 /* release request entry in fifo */
393 smp_wmb();
394 request->desc = NULL;
395
396 /* increment fifo tail */
397 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
398
399 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
400
401 atomic_dec(&priv->chan[ch].submit_count);
402
403 saved_req.callback(dev, saved_req.desc, saved_req.context,
404 status);
405 /* channel may resume processing in single desc error case */
406 if (error && !reset_ch && status == error)
407 return;
408 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
409 tail = priv->chan[ch].tail;
410 }
411
412 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
413}
414
415/*
416 * process completed requests for channels that have done status
417 */
418#define DEF_TALITOS1_DONE(name, ch_done_mask) \
419static void talitos1_done_##name(unsigned long data) \
420{ \
421 struct device *dev = (struct device *)data; \
422 struct talitos_private *priv = dev_get_drvdata(dev); \
423 unsigned long flags; \
424 \
425 if (ch_done_mask & 0x10000000) \
426 flush_channel(dev, 0, 0, 0); \
427 if (ch_done_mask & 0x40000000) \
428 flush_channel(dev, 1, 0, 0); \
429 if (ch_done_mask & 0x00010000) \
430 flush_channel(dev, 2, 0, 0); \
431 if (ch_done_mask & 0x00040000) \
432 flush_channel(dev, 3, 0, 0); \
433 \
434 /* At this point, all completed channels have been processed */ \
435 /* Unmask done interrupts for channels completed later on. */ \
436 spin_lock_irqsave(&priv->reg_lock, flags); \
437 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
438 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
439 spin_unlock_irqrestore(&priv->reg_lock, flags); \
440}
441
442DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
443DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
444
445#define DEF_TALITOS2_DONE(name, ch_done_mask) \
446static void talitos2_done_##name(unsigned long data) \
447{ \
448 struct device *dev = (struct device *)data; \
449 struct talitos_private *priv = dev_get_drvdata(dev); \
450 unsigned long flags; \
451 \
452 if (ch_done_mask & 1) \
453 flush_channel(dev, 0, 0, 0); \
454 if (ch_done_mask & (1 << 2)) \
455 flush_channel(dev, 1, 0, 0); \
456 if (ch_done_mask & (1 << 4)) \
457 flush_channel(dev, 2, 0, 0); \
458 if (ch_done_mask & (1 << 6)) \
459 flush_channel(dev, 3, 0, 0); \
460 \
461 /* At this point, all completed channels have been processed */ \
462 /* Unmask done interrupts for channels completed later on. */ \
463 spin_lock_irqsave(&priv->reg_lock, flags); \
464 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
465 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
466 spin_unlock_irqrestore(&priv->reg_lock, flags); \
467}
468
469DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
470DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
471DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
472DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
473
474/*
475 * locate current (offending) descriptor
476 */
477static u32 current_desc_hdr(struct device *dev, int ch)
478{
479 struct talitos_private *priv = dev_get_drvdata(dev);
480 int tail, iter;
481 dma_addr_t cur_desc;
482
483 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
484 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
485
486 if (!cur_desc) {
487 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
488 return 0;
489 }
490
491 tail = priv->chan[ch].tail;
492
493 iter = tail;
494 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
495 priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
496 iter = (iter + 1) & (priv->fifo_len - 1);
497 if (iter == tail) {
498 dev_err(dev, "couldn't locate current descriptor\n");
499 return 0;
500 }
501 }
502
503 if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) {
504 struct talitos_edesc *edesc;
505
506 edesc = container_of(priv->chan[ch].fifo[iter].desc,
507 struct talitos_edesc, desc);
508 return ((struct talitos_desc *)
509 (edesc->buf + edesc->dma_len))->hdr;
510 }
511
512 return priv->chan[ch].fifo[iter].desc->hdr;
513}
514
515/*
516 * user diagnostics; report root cause of error based on execution unit status
517 */
518static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
519{
520 struct talitos_private *priv = dev_get_drvdata(dev);
521 int i;
522
523 if (!desc_hdr)
524 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
525
526 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
527 case DESC_HDR_SEL0_AFEU:
528 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
529 in_be32(priv->reg_afeu + TALITOS_EUISR),
530 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
531 break;
532 case DESC_HDR_SEL0_DEU:
533 dev_err(dev, "DEUISR 0x%08x_%08x\n",
534 in_be32(priv->reg_deu + TALITOS_EUISR),
535 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
536 break;
537 case DESC_HDR_SEL0_MDEUA:
538 case DESC_HDR_SEL0_MDEUB:
539 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
540 in_be32(priv->reg_mdeu + TALITOS_EUISR),
541 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
542 break;
543 case DESC_HDR_SEL0_RNG:
544 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
545 in_be32(priv->reg_rngu + TALITOS_ISR),
546 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
547 break;
548 case DESC_HDR_SEL0_PKEU:
549 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
550 in_be32(priv->reg_pkeu + TALITOS_EUISR),
551 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
552 break;
553 case DESC_HDR_SEL0_AESU:
554 dev_err(dev, "AESUISR 0x%08x_%08x\n",
555 in_be32(priv->reg_aesu + TALITOS_EUISR),
556 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
557 break;
558 case DESC_HDR_SEL0_CRCU:
559 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
560 in_be32(priv->reg_crcu + TALITOS_EUISR),
561 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
562 break;
563 case DESC_HDR_SEL0_KEU:
564 dev_err(dev, "KEUISR 0x%08x_%08x\n",
565 in_be32(priv->reg_pkeu + TALITOS_EUISR),
566 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
567 break;
568 }
569
570 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
571 case DESC_HDR_SEL1_MDEUA:
572 case DESC_HDR_SEL1_MDEUB:
573 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
574 in_be32(priv->reg_mdeu + TALITOS_EUISR),
575 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
576 break;
577 case DESC_HDR_SEL1_CRCU:
578 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
579 in_be32(priv->reg_crcu + TALITOS_EUISR),
580 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
581 break;
582 }
583
584 for (i = 0; i < 8; i++)
585 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
586 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
587 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
588}
589
590/*
591 * recover from error interrupts
592 */
593static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
594{
595 struct talitos_private *priv = dev_get_drvdata(dev);
596 unsigned int timeout = TALITOS_TIMEOUT;
597 int ch, error, reset_dev = 0;
598 u32 v_lo;
599 bool is_sec1 = has_ftr_sec1(priv);
600 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
601
602 for (ch = 0; ch < priv->num_channels; ch++) {
603 /* skip channels without errors */
604 if (is_sec1) {
605 /* bits 29, 31, 17, 19 */
606 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
607 continue;
608 } else {
609 if (!(isr & (1 << (ch * 2 + 1))))
610 continue;
611 }
612
613 error = -EINVAL;
614
615 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
616
617 if (v_lo & TALITOS_CCPSR_LO_DOF) {
618 dev_err(dev, "double fetch fifo overflow error\n");
619 error = -EAGAIN;
620 reset_ch = 1;
621 }
622 if (v_lo & TALITOS_CCPSR_LO_SOF) {
623 /* h/w dropped descriptor */
624 dev_err(dev, "single fetch fifo overflow error\n");
625 error = -EAGAIN;
626 }
627 if (v_lo & TALITOS_CCPSR_LO_MDTE)
628 dev_err(dev, "master data transfer error\n");
629 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
630 dev_err(dev, is_sec1 ? "pointer not complete error\n"
631 : "s/g data length zero error\n");
632 if (v_lo & TALITOS_CCPSR_LO_FPZ)
633 dev_err(dev, is_sec1 ? "parity error\n"
634 : "fetch pointer zero error\n");
635 if (v_lo & TALITOS_CCPSR_LO_IDH)
636 dev_err(dev, "illegal descriptor header error\n");
637 if (v_lo & TALITOS_CCPSR_LO_IEU)
638 dev_err(dev, is_sec1 ? "static assignment error\n"
639 : "invalid exec unit error\n");
640 if (v_lo & TALITOS_CCPSR_LO_EU)
641 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
642 if (!is_sec1) {
643 if (v_lo & TALITOS_CCPSR_LO_GB)
644 dev_err(dev, "gather boundary error\n");
645 if (v_lo & TALITOS_CCPSR_LO_GRL)
646 dev_err(dev, "gather return/length error\n");
647 if (v_lo & TALITOS_CCPSR_LO_SB)
648 dev_err(dev, "scatter boundary error\n");
649 if (v_lo & TALITOS_CCPSR_LO_SRL)
650 dev_err(dev, "scatter return/length error\n");
651 }
652
653 flush_channel(dev, ch, error, reset_ch);
654
655 if (reset_ch) {
656 reset_channel(dev, ch);
657 } else {
658 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
659 TALITOS2_CCCR_CONT);
660 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
661 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
662 TALITOS2_CCCR_CONT) && --timeout)
663 cpu_relax();
664 if (timeout == 0) {
665 dev_err(dev, "failed to restart channel %d\n",
666 ch);
667 reset_dev = 1;
668 }
669 }
670 }
671 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
672 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
673 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
674 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
675 isr, isr_lo);
676 else
677 dev_err(dev, "done overflow, internal time out, or "
678 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
679
680 /* purge request queues */
681 for (ch = 0; ch < priv->num_channels; ch++)
682 flush_channel(dev, ch, -EIO, 1);
683
684 /* reset and reinitialize the device */
685 init_device(dev);
686 }
687}
688
689#define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
690static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
691{ \
692 struct device *dev = data; \
693 struct talitos_private *priv = dev_get_drvdata(dev); \
694 u32 isr, isr_lo; \
695 unsigned long flags; \
696 \
697 spin_lock_irqsave(&priv->reg_lock, flags); \
698 isr = in_be32(priv->reg + TALITOS_ISR); \
699 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
700 /* Acknowledge interrupt */ \
701 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
702 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
703 \
704 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
705 spin_unlock_irqrestore(&priv->reg_lock, flags); \
706 talitos_error(dev, isr & ch_err_mask, isr_lo); \
707 } \
708 else { \
709 if (likely(isr & ch_done_mask)) { \
710 /* mask further done interrupts. */ \
711 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
712 /* done_task will unmask done interrupts at exit */ \
713 tasklet_schedule(&priv->done_task[tlet]); \
714 } \
715 spin_unlock_irqrestore(&priv->reg_lock, flags); \
716 } \
717 \
718 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
719 IRQ_NONE; \
720}
721
722DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
723
724#define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
725static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
726{ \
727 struct device *dev = data; \
728 struct talitos_private *priv = dev_get_drvdata(dev); \
729 u32 isr, isr_lo; \
730 unsigned long flags; \
731 \
732 spin_lock_irqsave(&priv->reg_lock, flags); \
733 isr = in_be32(priv->reg + TALITOS_ISR); \
734 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
735 /* Acknowledge interrupt */ \
736 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
737 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
738 \
739 if (unlikely(isr & ch_err_mask || isr_lo)) { \
740 spin_unlock_irqrestore(&priv->reg_lock, flags); \
741 talitos_error(dev, isr & ch_err_mask, isr_lo); \
742 } \
743 else { \
744 if (likely(isr & ch_done_mask)) { \
745 /* mask further done interrupts. */ \
746 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
747 /* done_task will unmask done interrupts at exit */ \
748 tasklet_schedule(&priv->done_task[tlet]); \
749 } \
750 spin_unlock_irqrestore(&priv->reg_lock, flags); \
751 } \
752 \
753 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
754 IRQ_NONE; \
755}
756
757DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
758DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
759 0)
760DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
761 1)
762
763/*
764 * hwrng
765 */
766static int talitos_rng_data_present(struct hwrng *rng, int wait)
767{
768 struct device *dev = (struct device *)rng->priv;
769 struct talitos_private *priv = dev_get_drvdata(dev);
770 u32 ofl;
771 int i;
772
773 for (i = 0; i < 20; i++) {
774 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
775 TALITOS_RNGUSR_LO_OFL;
776 if (ofl || !wait)
777 break;
778 udelay(10);
779 }
780
781 return !!ofl;
782}
783
784static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
785{
786 struct device *dev = (struct device *)rng->priv;
787 struct talitos_private *priv = dev_get_drvdata(dev);
788
789 /* rng fifo requires 64-bit accesses */
790 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
791 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
792
793 return sizeof(u32);
794}
795
796static int talitos_rng_init(struct hwrng *rng)
797{
798 struct device *dev = (struct device *)rng->priv;
799 struct talitos_private *priv = dev_get_drvdata(dev);
800 unsigned int timeout = TALITOS_TIMEOUT;
801
802 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
803 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
804 & TALITOS_RNGUSR_LO_RD)
805 && --timeout)
806 cpu_relax();
807 if (timeout == 0) {
808 dev_err(dev, "failed to reset rng hw\n");
809 return -ENODEV;
810 }
811
812 /* start generating */
813 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
814
815 return 0;
816}
817
818static int talitos_register_rng(struct device *dev)
819{
820 struct talitos_private *priv = dev_get_drvdata(dev);
821 int err;
822
823 priv->rng.name = dev_driver_string(dev),
824 priv->rng.init = talitos_rng_init,
825 priv->rng.data_present = talitos_rng_data_present,
826 priv->rng.data_read = talitos_rng_data_read,
827 priv->rng.priv = (unsigned long)dev;
828
829 err = hwrng_register(&priv->rng);
830 if (!err)
831 priv->rng_registered = true;
832
833 return err;
834}
835
836static void talitos_unregister_rng(struct device *dev)
837{
838 struct talitos_private *priv = dev_get_drvdata(dev);
839
840 if (!priv->rng_registered)
841 return;
842
843 hwrng_unregister(&priv->rng);
844 priv->rng_registered = false;
845}
846
847/*
848 * crypto alg
849 */
850#define TALITOS_CRA_PRIORITY 3000
851/*
852 * Defines a priority for doing AEAD with descriptors type
853 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
854 */
855#define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
856#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
857#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
858
859struct talitos_ctx {
860 struct device *dev;
861 int ch;
862 __be32 desc_hdr_template;
863 u8 key[TALITOS_MAX_KEY_SIZE];
864 u8 iv[TALITOS_MAX_IV_LENGTH];
865 dma_addr_t dma_key;
866 unsigned int keylen;
867 unsigned int enckeylen;
868 unsigned int authkeylen;
869};
870
871#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
872#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
873
874struct talitos_ahash_req_ctx {
875 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
876 unsigned int hw_context_size;
877 u8 buf[2][HASH_MAX_BLOCK_SIZE];
878 int buf_idx;
879 unsigned int swinit;
880 unsigned int first;
881 unsigned int last;
882 unsigned int to_hash_later;
883 unsigned int nbuf;
884 struct scatterlist bufsl[2];
885 struct scatterlist *psrc;
886};
887
888struct talitos_export_state {
889 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
890 u8 buf[HASH_MAX_BLOCK_SIZE];
891 unsigned int swinit;
892 unsigned int first;
893 unsigned int last;
894 unsigned int to_hash_later;
895 unsigned int nbuf;
896};
897
898static int aead_setkey(struct crypto_aead *authenc,
899 const u8 *key, unsigned int keylen)
900{
901 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
902 struct device *dev = ctx->dev;
903 struct crypto_authenc_keys keys;
904
905 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
906 goto badkey;
907
908 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
909 goto badkey;
910
911 if (ctx->keylen)
912 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
913
914 memcpy(ctx->key, keys.authkey, keys.authkeylen);
915 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
916
917 ctx->keylen = keys.authkeylen + keys.enckeylen;
918 ctx->enckeylen = keys.enckeylen;
919 ctx->authkeylen = keys.authkeylen;
920 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
921 DMA_TO_DEVICE);
922
923 memzero_explicit(&keys, sizeof(keys));
924 return 0;
925
926badkey:
927 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
928 memzero_explicit(&keys, sizeof(keys));
929 return -EINVAL;
930}
931
932static void talitos_sg_unmap(struct device *dev,
933 struct talitos_edesc *edesc,
934 struct scatterlist *src,
935 struct scatterlist *dst,
936 unsigned int len, unsigned int offset)
937{
938 struct talitos_private *priv = dev_get_drvdata(dev);
939 bool is_sec1 = has_ftr_sec1(priv);
940 unsigned int src_nents = edesc->src_nents ? : 1;
941 unsigned int dst_nents = edesc->dst_nents ? : 1;
942
943 if (is_sec1 && dst && dst_nents > 1) {
944 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
945 len, DMA_FROM_DEVICE);
946 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
947 offset);
948 }
949 if (src != dst) {
950 if (src_nents == 1 || !is_sec1)
951 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
952
953 if (dst && (dst_nents == 1 || !is_sec1))
954 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
955 } else if (src_nents == 1 || !is_sec1) {
956 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
957 }
958}
959
960static void ipsec_esp_unmap(struct device *dev,
961 struct talitos_edesc *edesc,
962 struct aead_request *areq, bool encrypt)
963{
964 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
965 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
966 unsigned int ivsize = crypto_aead_ivsize(aead);
967 unsigned int authsize = crypto_aead_authsize(aead);
968 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
969 bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
970 struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
971
972 if (is_ipsec_esp)
973 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
974 DMA_FROM_DEVICE);
975 unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
976
977 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, cryptlen,
978 areq->assoclen);
979
980 if (edesc->dma_len)
981 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
982 DMA_BIDIRECTIONAL);
983
984 if (!is_ipsec_esp) {
985 unsigned int dst_nents = edesc->dst_nents ? : 1;
986
987 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
988 areq->assoclen + cryptlen - ivsize);
989 }
990}
991
992/*
993 * ipsec_esp descriptor callbacks
994 */
995static void ipsec_esp_encrypt_done(struct device *dev,
996 struct talitos_desc *desc, void *context,
997 int err)
998{
999 struct talitos_private *priv = dev_get_drvdata(dev);
1000 bool is_sec1 = has_ftr_sec1(priv);
1001 struct aead_request *areq = context;
1002 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1003 unsigned int authsize = crypto_aead_authsize(authenc);
1004 unsigned int ivsize = crypto_aead_ivsize(authenc);
1005 struct talitos_edesc *edesc;
1006 void *icvdata;
1007
1008 edesc = container_of(desc, struct talitos_edesc, desc);
1009
1010 ipsec_esp_unmap(dev, edesc, areq, true);
1011
1012 /* copy the generated ICV to dst */
1013 if (edesc->icv_ool) {
1014 if (is_sec1)
1015 icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
1016 else
1017 icvdata = &edesc->link_tbl[edesc->src_nents +
1018 edesc->dst_nents + 2];
1019 sg_pcopy_from_buffer(areq->dst, edesc->dst_nents ? : 1, icvdata,
1020 authsize, areq->assoclen + areq->cryptlen);
1021 }
1022
1023 dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1024
1025 kfree(edesc);
1026
1027 aead_request_complete(areq, err);
1028}
1029
1030static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1031 struct talitos_desc *desc,
1032 void *context, int err)
1033{
1034 struct aead_request *req = context;
1035 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1036 unsigned int authsize = crypto_aead_authsize(authenc);
1037 struct talitos_edesc *edesc;
1038 char *oicv, *icv;
1039 struct talitos_private *priv = dev_get_drvdata(dev);
1040 bool is_sec1 = has_ftr_sec1(priv);
1041
1042 edesc = container_of(desc, struct talitos_edesc, desc);
1043
1044 ipsec_esp_unmap(dev, edesc, req, false);
1045
1046 if (!err) {
1047 char icvdata[SHA512_DIGEST_SIZE];
1048 int nents = edesc->dst_nents ? : 1;
1049 unsigned int len = req->assoclen + req->cryptlen;
1050
1051 /* auth check */
1052 if (nents > 1) {
1053 sg_pcopy_to_buffer(req->dst, nents, icvdata, authsize,
1054 len - authsize);
1055 icv = icvdata;
1056 } else {
1057 icv = (char *)sg_virt(req->dst) + len - authsize;
1058 }
1059
1060 if (edesc->dma_len) {
1061 if (is_sec1)
1062 oicv = (char *)&edesc->dma_link_tbl +
1063 req->assoclen + req->cryptlen;
1064 else
1065 oicv = (char *)
1066 &edesc->link_tbl[edesc->src_nents +
1067 edesc->dst_nents + 2];
1068 if (edesc->icv_ool)
1069 icv = oicv + authsize;
1070 } else
1071 oicv = (char *)&edesc->link_tbl[0];
1072
1073 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1074 }
1075
1076 kfree(edesc);
1077
1078 aead_request_complete(req, err);
1079}
1080
1081static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1082 struct talitos_desc *desc,
1083 void *context, int err)
1084{
1085 struct aead_request *req = context;
1086 struct talitos_edesc *edesc;
1087
1088 edesc = container_of(desc, struct talitos_edesc, desc);
1089
1090 ipsec_esp_unmap(dev, edesc, req, false);
1091
1092 /* check ICV auth status */
1093 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1094 DESC_HDR_LO_ICCR1_PASS))
1095 err = -EBADMSG;
1096
1097 kfree(edesc);
1098
1099 aead_request_complete(req, err);
1100}
1101
1102/*
1103 * convert scatterlist to SEC h/w link table format
1104 * stop at cryptlen bytes
1105 */
1106static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1107 unsigned int offset, int cryptlen,
1108 struct talitos_ptr *link_tbl_ptr)
1109{
1110 int n_sg = sg_count;
1111 int count = 0;
1112
1113 while (cryptlen && sg && n_sg--) {
1114 unsigned int len = sg_dma_len(sg);
1115
1116 if (offset >= len) {
1117 offset -= len;
1118 goto next;
1119 }
1120
1121 len -= offset;
1122
1123 if (len > cryptlen)
1124 len = cryptlen;
1125
1126 to_talitos_ptr(link_tbl_ptr + count,
1127 sg_dma_address(sg) + offset, len, 0);
1128 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1129 count++;
1130 cryptlen -= len;
1131 offset = 0;
1132
1133next:
1134 sg = sg_next(sg);
1135 }
1136
1137 /* tag end of link table */
1138 if (count > 0)
1139 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1140 DESC_PTR_LNKTBL_RETURN, 0);
1141
1142 return count;
1143}
1144
1145static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1146 unsigned int len, struct talitos_edesc *edesc,
1147 struct talitos_ptr *ptr, int sg_count,
1148 unsigned int offset, int tbl_off, int elen)
1149{
1150 struct talitos_private *priv = dev_get_drvdata(dev);
1151 bool is_sec1 = has_ftr_sec1(priv);
1152
1153 if (!src) {
1154 to_talitos_ptr(ptr, 0, 0, is_sec1);
1155 return 1;
1156 }
1157 to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1158 if (sg_count == 1) {
1159 to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1160 return sg_count;
1161 }
1162 if (is_sec1) {
1163 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1164 return sg_count;
1165 }
1166 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
1167 &edesc->link_tbl[tbl_off]);
1168 if (sg_count == 1) {
1169 /* Only one segment now, so no link tbl needed*/
1170 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1171 return sg_count;
1172 }
1173 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1174 tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
1175 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1176
1177 return sg_count;
1178}
1179
1180static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1181 unsigned int len, struct talitos_edesc *edesc,
1182 struct talitos_ptr *ptr, int sg_count,
1183 unsigned int offset, int tbl_off)
1184{
1185 return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1186 tbl_off, 0);
1187}
1188
1189/*
1190 * fill in and submit ipsec_esp descriptor
1191 */
1192static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1193 bool encrypt,
1194 void (*callback)(struct device *dev,
1195 struct talitos_desc *desc,
1196 void *context, int error))
1197{
1198 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1199 unsigned int authsize = crypto_aead_authsize(aead);
1200 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1201 struct device *dev = ctx->dev;
1202 struct talitos_desc *desc = &edesc->desc;
1203 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1204 unsigned int ivsize = crypto_aead_ivsize(aead);
1205 int tbl_off = 0;
1206 int sg_count, ret;
1207 int elen = 0;
1208 bool sync_needed = false;
1209 struct talitos_private *priv = dev_get_drvdata(dev);
1210 bool is_sec1 = has_ftr_sec1(priv);
1211 bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1212 struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1213 struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1214
1215 /* hmac key */
1216 to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1217
1218 sg_count = edesc->src_nents ?: 1;
1219 if (is_sec1 && sg_count > 1)
1220 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1221 areq->assoclen + cryptlen);
1222 else
1223 sg_count = dma_map_sg(dev, areq->src, sg_count,
1224 (areq->src == areq->dst) ?
1225 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1226
1227 /* hmac data */
1228 ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1229 &desc->ptr[1], sg_count, 0, tbl_off);
1230
1231 if (ret > 1) {
1232 tbl_off += ret;
1233 sync_needed = true;
1234 }
1235
1236 /* cipher iv */
1237 to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1238
1239 /* cipher key */
1240 to_talitos_ptr(ckey_ptr, ctx->dma_key + ctx->authkeylen,
1241 ctx->enckeylen, is_sec1);
1242
1243 /*
1244 * cipher in
1245 * map and adjust cipher len to aead request cryptlen.
1246 * extent is bytes of HMAC postpended to ciphertext,
1247 * typically 12 for ipsec
1248 */
1249 if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1250 elen = authsize;
1251
1252 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1253 sg_count, areq->assoclen, tbl_off, elen);
1254
1255 if (ret > 1) {
1256 tbl_off += ret;
1257 sync_needed = true;
1258 }
1259
1260 /* cipher out */
1261 if (areq->src != areq->dst) {
1262 sg_count = edesc->dst_nents ? : 1;
1263 if (!is_sec1 || sg_count == 1)
1264 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1265 }
1266
1267 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1268 sg_count, areq->assoclen, tbl_off);
1269
1270 if (is_ipsec_esp)
1271 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1272
1273 /* ICV data */
1274 if (ret > 1) {
1275 tbl_off += ret;
1276 edesc->icv_ool = true;
1277 sync_needed = true;
1278
1279 if (is_ipsec_esp) {
1280 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1281 int offset = (edesc->src_nents + edesc->dst_nents + 2) *
1282 sizeof(struct talitos_ptr) + authsize;
1283
1284 /* Add an entry to the link table for ICV data */
1285 to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1286 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
1287 is_sec1);
1288
1289 /* icv data follows link tables */
1290 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
1291 authsize, is_sec1);
1292 } else {
1293 dma_addr_t addr = edesc->dma_link_tbl;
1294
1295 if (is_sec1)
1296 addr += areq->assoclen + cryptlen;
1297 else
1298 addr += sizeof(struct talitos_ptr) * tbl_off;
1299
1300 to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1);
1301 }
1302 } else if (!is_ipsec_esp) {
1303 ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
1304 &desc->ptr[6], sg_count, areq->assoclen +
1305 cryptlen,
1306 tbl_off);
1307 if (ret > 1) {
1308 tbl_off += ret;
1309 edesc->icv_ool = true;
1310 sync_needed = true;
1311 } else {
1312 edesc->icv_ool = false;
1313 }
1314 } else {
1315 edesc->icv_ool = false;
1316 }
1317
1318 /* iv out */
1319 if (is_ipsec_esp)
1320 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1321 DMA_FROM_DEVICE);
1322
1323 if (sync_needed)
1324 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1325 edesc->dma_len,
1326 DMA_BIDIRECTIONAL);
1327
1328 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1329 if (ret != -EINPROGRESS) {
1330 ipsec_esp_unmap(dev, edesc, areq, encrypt);
1331 kfree(edesc);
1332 }
1333 return ret;
1334}
1335
1336/*
1337 * allocate and map the extended descriptor
1338 */
1339static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1340 struct scatterlist *src,
1341 struct scatterlist *dst,
1342 u8 *iv,
1343 unsigned int assoclen,
1344 unsigned int cryptlen,
1345 unsigned int authsize,
1346 unsigned int ivsize,
1347 int icv_stashing,
1348 u32 cryptoflags,
1349 bool encrypt)
1350{
1351 struct talitos_edesc *edesc;
1352 int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1353 dma_addr_t iv_dma = 0;
1354 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1355 GFP_ATOMIC;
1356 struct talitos_private *priv = dev_get_drvdata(dev);
1357 bool is_sec1 = has_ftr_sec1(priv);
1358 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1359
1360 if (cryptlen + authsize > max_len) {
1361 dev_err(dev, "length exceeds h/w max limit\n");
1362 return ERR_PTR(-EINVAL);
1363 }
1364
1365 if (!dst || dst == src) {
1366 src_len = assoclen + cryptlen + authsize;
1367 src_nents = sg_nents_for_len(src, src_len);
1368 if (src_nents < 0) {
1369 dev_err(dev, "Invalid number of src SG.\n");
1370 return ERR_PTR(-EINVAL);
1371 }
1372 src_nents = (src_nents == 1) ? 0 : src_nents;
1373 dst_nents = dst ? src_nents : 0;
1374 dst_len = 0;
1375 } else { /* dst && dst != src*/
1376 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1377 src_nents = sg_nents_for_len(src, src_len);
1378 if (src_nents < 0) {
1379 dev_err(dev, "Invalid number of src SG.\n");
1380 return ERR_PTR(-EINVAL);
1381 }
1382 src_nents = (src_nents == 1) ? 0 : src_nents;
1383 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1384 dst_nents = sg_nents_for_len(dst, dst_len);
1385 if (dst_nents < 0) {
1386 dev_err(dev, "Invalid number of dst SG.\n");
1387 return ERR_PTR(-EINVAL);
1388 }
1389 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1390 }
1391
1392 /*
1393 * allocate space for base edesc plus the link tables,
1394 * allowing for two separate entries for AD and generated ICV (+ 2),
1395 * and space for two sets of ICVs (stashed and generated)
1396 */
1397 alloc_len = sizeof(struct talitos_edesc);
1398 if (src_nents || dst_nents) {
1399 if (is_sec1)
1400 dma_len = (src_nents ? src_len : 0) +
1401 (dst_nents ? dst_len : 0);
1402 else
1403 dma_len = (src_nents + dst_nents + 2) *
1404 sizeof(struct talitos_ptr) + authsize * 2;
1405 alloc_len += dma_len;
1406 } else {
1407 dma_len = 0;
1408 alloc_len += icv_stashing ? authsize : 0;
1409 }
1410
1411 /* if its a ahash, add space for a second desc next to the first one */
1412 if (is_sec1 && !dst)
1413 alloc_len += sizeof(struct talitos_desc);
1414 alloc_len += ivsize;
1415
1416 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1417 if (!edesc)
1418 return ERR_PTR(-ENOMEM);
1419 if (ivsize) {
1420 iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1421 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1422 }
1423 memset(&edesc->desc, 0, sizeof(edesc->desc));
1424
1425 edesc->src_nents = src_nents;
1426 edesc->dst_nents = dst_nents;
1427 edesc->iv_dma = iv_dma;
1428 edesc->dma_len = dma_len;
1429 if (dma_len)
1430 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1431 edesc->dma_len,
1432 DMA_BIDIRECTIONAL);
1433
1434 return edesc;
1435}
1436
1437static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1438 int icv_stashing, bool encrypt)
1439{
1440 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1441 unsigned int authsize = crypto_aead_authsize(authenc);
1442 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1443 unsigned int ivsize = crypto_aead_ivsize(authenc);
1444 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1445
1446 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1447 iv, areq->assoclen, cryptlen,
1448 authsize, ivsize, icv_stashing,
1449 areq->base.flags, encrypt);
1450}
1451
1452static int aead_encrypt(struct aead_request *req)
1453{
1454 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1455 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1456 struct talitos_edesc *edesc;
1457
1458 /* allocate extended descriptor */
1459 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1460 if (IS_ERR(edesc))
1461 return PTR_ERR(edesc);
1462
1463 /* set encrypt */
1464 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1465
1466 return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
1467}
1468
1469static int aead_decrypt(struct aead_request *req)
1470{
1471 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1472 unsigned int authsize = crypto_aead_authsize(authenc);
1473 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1474 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1475 struct talitos_edesc *edesc;
1476 void *icvdata;
1477
1478 /* allocate extended descriptor */
1479 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1480 if (IS_ERR(edesc))
1481 return PTR_ERR(edesc);
1482
1483 if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1484 (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1485 ((!edesc->src_nents && !edesc->dst_nents) ||
1486 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1487
1488 /* decrypt and check the ICV */
1489 edesc->desc.hdr = ctx->desc_hdr_template |
1490 DESC_HDR_DIR_INBOUND |
1491 DESC_HDR_MODE1_MDEU_CICV;
1492
1493 /* reset integrity check result bits */
1494
1495 return ipsec_esp(edesc, req, false,
1496 ipsec_esp_decrypt_hwauth_done);
1497 }
1498
1499 /* Have to check the ICV with software */
1500 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1501
1502 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1503 if (edesc->dma_len)
1504 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1505 edesc->dst_nents + 2];
1506 else
1507 icvdata = &edesc->link_tbl[0];
1508
1509 sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
1510 req->assoclen + req->cryptlen - authsize);
1511
1512 return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
1513}
1514
1515static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1516 const u8 *key, unsigned int keylen)
1517{
1518 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1519 struct device *dev = ctx->dev;
1520 u32 tmp[DES_EXPKEY_WORDS];
1521
1522 if (keylen > TALITOS_MAX_KEY_SIZE) {
1523 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1524 return -EINVAL;
1525 }
1526
1527 if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1528 CRYPTO_TFM_REQ_WEAK_KEY) &&
1529 !des_ekey(tmp, key)) {
1530 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1531 return -EINVAL;
1532 }
1533
1534 if (ctx->keylen)
1535 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1536
1537 memcpy(&ctx->key, key, keylen);
1538 ctx->keylen = keylen;
1539
1540 ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1541
1542 return 0;
1543}
1544
1545static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
1546 const u8 *key, unsigned int keylen)
1547{
1548 if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
1549 keylen == AES_KEYSIZE_256)
1550 return ablkcipher_setkey(cipher, key, keylen);
1551
1552 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1553
1554 return -EINVAL;
1555}
1556
1557static void common_nonsnoop_unmap(struct device *dev,
1558 struct talitos_edesc *edesc,
1559 struct ablkcipher_request *areq)
1560{
1561 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1562
1563 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1564 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1565
1566 if (edesc->dma_len)
1567 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1568 DMA_BIDIRECTIONAL);
1569}
1570
1571static void ablkcipher_done(struct device *dev,
1572 struct talitos_desc *desc, void *context,
1573 int err)
1574{
1575 struct ablkcipher_request *areq = context;
1576 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1577 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1578 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1579 struct talitos_edesc *edesc;
1580
1581 edesc = container_of(desc, struct talitos_edesc, desc);
1582
1583 common_nonsnoop_unmap(dev, edesc, areq);
1584 memcpy(areq->info, ctx->iv, ivsize);
1585
1586 kfree(edesc);
1587
1588 areq->base.complete(&areq->base, err);
1589}
1590
1591static int common_nonsnoop(struct talitos_edesc *edesc,
1592 struct ablkcipher_request *areq,
1593 void (*callback) (struct device *dev,
1594 struct talitos_desc *desc,
1595 void *context, int error))
1596{
1597 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1598 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1599 struct device *dev = ctx->dev;
1600 struct talitos_desc *desc = &edesc->desc;
1601 unsigned int cryptlen = areq->nbytes;
1602 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1603 int sg_count, ret;
1604 bool sync_needed = false;
1605 struct talitos_private *priv = dev_get_drvdata(dev);
1606 bool is_sec1 = has_ftr_sec1(priv);
1607
1608 /* first DWORD empty */
1609
1610 /* cipher iv */
1611 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1612
1613 /* cipher key */
1614 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1615
1616 sg_count = edesc->src_nents ?: 1;
1617 if (is_sec1 && sg_count > 1)
1618 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1619 cryptlen);
1620 else
1621 sg_count = dma_map_sg(dev, areq->src, sg_count,
1622 (areq->src == areq->dst) ?
1623 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1624 /*
1625 * cipher in
1626 */
1627 sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1628 &desc->ptr[3], sg_count, 0, 0);
1629 if (sg_count > 1)
1630 sync_needed = true;
1631
1632 /* cipher out */
1633 if (areq->src != areq->dst) {
1634 sg_count = edesc->dst_nents ? : 1;
1635 if (!is_sec1 || sg_count == 1)
1636 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1637 }
1638
1639 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1640 sg_count, 0, (edesc->src_nents + 1));
1641 if (ret > 1)
1642 sync_needed = true;
1643
1644 /* iv out */
1645 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1646 DMA_FROM_DEVICE);
1647
1648 /* last DWORD empty */
1649
1650 if (sync_needed)
1651 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1652 edesc->dma_len, DMA_BIDIRECTIONAL);
1653
1654 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1655 if (ret != -EINPROGRESS) {
1656 common_nonsnoop_unmap(dev, edesc, areq);
1657 kfree(edesc);
1658 }
1659 return ret;
1660}
1661
1662static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1663 areq, bool encrypt)
1664{
1665 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1666 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1667 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1668
1669 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1670 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1671 areq->base.flags, encrypt);
1672}
1673
1674static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1675{
1676 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1677 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1678 struct talitos_edesc *edesc;
1679 unsigned int blocksize =
1680 crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1681
1682 if (!areq->nbytes)
1683 return 0;
1684
1685 if (areq->nbytes % blocksize)
1686 return -EINVAL;
1687
1688 /* allocate extended descriptor */
1689 edesc = ablkcipher_edesc_alloc(areq, true);
1690 if (IS_ERR(edesc))
1691 return PTR_ERR(edesc);
1692
1693 /* set encrypt */
1694 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1695
1696 return common_nonsnoop(edesc, areq, ablkcipher_done);
1697}
1698
1699static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1700{
1701 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1702 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1703 struct talitos_edesc *edesc;
1704 unsigned int blocksize =
1705 crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1706
1707 if (!areq->nbytes)
1708 return 0;
1709
1710 if (areq->nbytes % blocksize)
1711 return -EINVAL;
1712
1713 /* allocate extended descriptor */
1714 edesc = ablkcipher_edesc_alloc(areq, false);
1715 if (IS_ERR(edesc))
1716 return PTR_ERR(edesc);
1717
1718 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1719
1720 return common_nonsnoop(edesc, areq, ablkcipher_done);
1721}
1722
1723static void common_nonsnoop_hash_unmap(struct device *dev,
1724 struct talitos_edesc *edesc,
1725 struct ahash_request *areq)
1726{
1727 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1728 struct talitos_private *priv = dev_get_drvdata(dev);
1729 bool is_sec1 = has_ftr_sec1(priv);
1730 struct talitos_desc *desc = &edesc->desc;
1731 struct talitos_desc *desc2 = (struct talitos_desc *)
1732 (edesc->buf + edesc->dma_len);
1733
1734 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1735 if (desc->next_desc &&
1736 desc->ptr[5].ptr != desc2->ptr[5].ptr)
1737 unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1738
1739 if (req_ctx->psrc)
1740 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1741
1742 /* When using hashctx-in, must unmap it. */
1743 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1744 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1745 DMA_TO_DEVICE);
1746 else if (desc->next_desc)
1747 unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1748 DMA_TO_DEVICE);
1749
1750 if (is_sec1 && req_ctx->nbuf)
1751 unmap_single_talitos_ptr(dev, &desc->ptr[3],
1752 DMA_TO_DEVICE);
1753
1754 if (edesc->dma_len)
1755 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1756 DMA_BIDIRECTIONAL);
1757
1758 if (edesc->desc.next_desc)
1759 dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1760 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1761}
1762
1763static void ahash_done(struct device *dev,
1764 struct talitos_desc *desc, void *context,
1765 int err)
1766{
1767 struct ahash_request *areq = context;
1768 struct talitos_edesc *edesc =
1769 container_of(desc, struct talitos_edesc, desc);
1770 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1771
1772 if (!req_ctx->last && req_ctx->to_hash_later) {
1773 /* Position any partial block for next update/final/finup */
1774 req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1775 req_ctx->nbuf = req_ctx->to_hash_later;
1776 }
1777 common_nonsnoop_hash_unmap(dev, edesc, areq);
1778
1779 kfree(edesc);
1780
1781 areq->base.complete(&areq->base, err);
1782}
1783
1784/*
1785 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1786 * ourself and submit a padded block
1787 */
1788static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1789 struct talitos_edesc *edesc,
1790 struct talitos_ptr *ptr)
1791{
1792 static u8 padded_hash[64] = {
1793 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1794 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1795 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1796 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1797 };
1798
1799 pr_err_once("Bug in SEC1, padding ourself\n");
1800 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1801 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1802 (char *)padded_hash, DMA_TO_DEVICE);
1803}
1804
1805static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1806 struct ahash_request *areq, unsigned int length,
1807 void (*callback) (struct device *dev,
1808 struct talitos_desc *desc,
1809 void *context, int error))
1810{
1811 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1812 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1813 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1814 struct device *dev = ctx->dev;
1815 struct talitos_desc *desc = &edesc->desc;
1816 int ret;
1817 bool sync_needed = false;
1818 struct talitos_private *priv = dev_get_drvdata(dev);
1819 bool is_sec1 = has_ftr_sec1(priv);
1820 int sg_count;
1821
1822 /* first DWORD empty */
1823
1824 /* hash context in */
1825 if (!req_ctx->first || req_ctx->swinit) {
1826 map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1827 req_ctx->hw_context_size,
1828 req_ctx->hw_context,
1829 DMA_TO_DEVICE);
1830 req_ctx->swinit = 0;
1831 }
1832 /* Indicate next op is not the first. */
1833 req_ctx->first = 0;
1834
1835 /* HMAC key */
1836 if (ctx->keylen)
1837 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1838 is_sec1);
1839
1840 if (is_sec1 && req_ctx->nbuf)
1841 length -= req_ctx->nbuf;
1842
1843 sg_count = edesc->src_nents ?: 1;
1844 if (is_sec1 && sg_count > 1)
1845 sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
1846 else if (length)
1847 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1848 DMA_TO_DEVICE);
1849 /*
1850 * data in
1851 */
1852 if (is_sec1 && req_ctx->nbuf) {
1853 map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1854 req_ctx->buf[req_ctx->buf_idx],
1855 DMA_TO_DEVICE);
1856 } else {
1857 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1858 &desc->ptr[3], sg_count, 0, 0);
1859 if (sg_count > 1)
1860 sync_needed = true;
1861 }
1862
1863 /* fifth DWORD empty */
1864
1865 /* hash/HMAC out -or- hash context out */
1866 if (req_ctx->last)
1867 map_single_talitos_ptr(dev, &desc->ptr[5],
1868 crypto_ahash_digestsize(tfm),
1869 areq->result, DMA_FROM_DEVICE);
1870 else
1871 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1872 req_ctx->hw_context_size,
1873 req_ctx->hw_context,
1874 DMA_FROM_DEVICE);
1875
1876 /* last DWORD empty */
1877
1878 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1879 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1880
1881 if (is_sec1 && req_ctx->nbuf && length) {
1882 struct talitos_desc *desc2 = (struct talitos_desc *)
1883 (edesc->buf + edesc->dma_len);
1884 dma_addr_t next_desc;
1885
1886 memset(desc2, 0, sizeof(*desc2));
1887 desc2->hdr = desc->hdr;
1888 desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1889 desc2->hdr1 = desc2->hdr;
1890 desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1891 desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1892 desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1893
1894 if (desc->ptr[1].ptr)
1895 copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1896 is_sec1);
1897 else
1898 map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1899 req_ctx->hw_context_size,
1900 req_ctx->hw_context,
1901 DMA_TO_DEVICE);
1902 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1903 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1904 &desc2->ptr[3], sg_count, 0, 0);
1905 if (sg_count > 1)
1906 sync_needed = true;
1907 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1908 if (req_ctx->last)
1909 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1910 req_ctx->hw_context_size,
1911 req_ctx->hw_context,
1912 DMA_FROM_DEVICE);
1913
1914 next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1915 DMA_BIDIRECTIONAL);
1916 desc->next_desc = cpu_to_be32(next_desc);
1917 }
1918
1919 if (sync_needed)
1920 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1921 edesc->dma_len, DMA_BIDIRECTIONAL);
1922
1923 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1924 if (ret != -EINPROGRESS) {
1925 common_nonsnoop_hash_unmap(dev, edesc, areq);
1926 kfree(edesc);
1927 }
1928 return ret;
1929}
1930
1931static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1932 unsigned int nbytes)
1933{
1934 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1935 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1936 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1937 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1938 bool is_sec1 = has_ftr_sec1(priv);
1939
1940 if (is_sec1)
1941 nbytes -= req_ctx->nbuf;
1942
1943 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1944 nbytes, 0, 0, 0, areq->base.flags, false);
1945}
1946
1947static int ahash_init(struct ahash_request *areq)
1948{
1949 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1950 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1951 struct device *dev = ctx->dev;
1952 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1953 unsigned int size;
1954 dma_addr_t dma;
1955
1956 /* Initialize the context */
1957 req_ctx->buf_idx = 0;
1958 req_ctx->nbuf = 0;
1959 req_ctx->first = 1; /* first indicates h/w must init its context */
1960 req_ctx->swinit = 0; /* assume h/w init of context */
1961 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1962 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1963 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1964 req_ctx->hw_context_size = size;
1965
1966 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
1967 DMA_TO_DEVICE);
1968 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
1969
1970 return 0;
1971}
1972
1973/*
1974 * on h/w without explicit sha224 support, we initialize h/w context
1975 * manually with sha224 constants, and tell it to run sha256.
1976 */
1977static int ahash_init_sha224_swinit(struct ahash_request *areq)
1978{
1979 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1980
1981 req_ctx->hw_context[0] = SHA224_H0;
1982 req_ctx->hw_context[1] = SHA224_H1;
1983 req_ctx->hw_context[2] = SHA224_H2;
1984 req_ctx->hw_context[3] = SHA224_H3;
1985 req_ctx->hw_context[4] = SHA224_H4;
1986 req_ctx->hw_context[5] = SHA224_H5;
1987 req_ctx->hw_context[6] = SHA224_H6;
1988 req_ctx->hw_context[7] = SHA224_H7;
1989
1990 /* init 64-bit count */
1991 req_ctx->hw_context[8] = 0;
1992 req_ctx->hw_context[9] = 0;
1993
1994 ahash_init(areq);
1995 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1996
1997 return 0;
1998}
1999
2000static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
2001{
2002 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2003 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2004 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2005 struct talitos_edesc *edesc;
2006 unsigned int blocksize =
2007 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2008 unsigned int nbytes_to_hash;
2009 unsigned int to_hash_later;
2010 unsigned int nsg;
2011 int nents;
2012 struct device *dev = ctx->dev;
2013 struct talitos_private *priv = dev_get_drvdata(dev);
2014 bool is_sec1 = has_ftr_sec1(priv);
2015 u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
2016
2017 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
2018 /* Buffer up to one whole block */
2019 nents = sg_nents_for_len(areq->src, nbytes);
2020 if (nents < 0) {
2021 dev_err(ctx->dev, "Invalid number of src SG.\n");
2022 return nents;
2023 }
2024 sg_copy_to_buffer(areq->src, nents,
2025 ctx_buf + req_ctx->nbuf, nbytes);
2026 req_ctx->nbuf += nbytes;
2027 return 0;
2028 }
2029
2030 /* At least (blocksize + 1) bytes are available to hash */
2031 nbytes_to_hash = nbytes + req_ctx->nbuf;
2032 to_hash_later = nbytes_to_hash & (blocksize - 1);
2033
2034 if (req_ctx->last)
2035 to_hash_later = 0;
2036 else if (to_hash_later)
2037 /* There is a partial block. Hash the full block(s) now */
2038 nbytes_to_hash -= to_hash_later;
2039 else {
2040 /* Keep one block buffered */
2041 nbytes_to_hash -= blocksize;
2042 to_hash_later = blocksize;
2043 }
2044
2045 /* Chain in any previously buffered data */
2046 if (!is_sec1 && req_ctx->nbuf) {
2047 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2048 sg_init_table(req_ctx->bufsl, nsg);
2049 sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2050 if (nsg > 1)
2051 sg_chain(req_ctx->bufsl, 2, areq->src);
2052 req_ctx->psrc = req_ctx->bufsl;
2053 } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2054 int offset;
2055
2056 if (nbytes_to_hash > blocksize)
2057 offset = blocksize - req_ctx->nbuf;
2058 else
2059 offset = nbytes_to_hash - req_ctx->nbuf;
2060 nents = sg_nents_for_len(areq->src, offset);
2061 if (nents < 0) {
2062 dev_err(ctx->dev, "Invalid number of src SG.\n");
2063 return nents;
2064 }
2065 sg_copy_to_buffer(areq->src, nents,
2066 ctx_buf + req_ctx->nbuf, offset);
2067 req_ctx->nbuf += offset;
2068 req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
2069 offset);
2070 } else
2071 req_ctx->psrc = areq->src;
2072
2073 if (to_hash_later) {
2074 nents = sg_nents_for_len(areq->src, nbytes);
2075 if (nents < 0) {
2076 dev_err(ctx->dev, "Invalid number of src SG.\n");
2077 return nents;
2078 }
2079 sg_pcopy_to_buffer(areq->src, nents,
2080 req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2081 to_hash_later,
2082 nbytes - to_hash_later);
2083 }
2084 req_ctx->to_hash_later = to_hash_later;
2085
2086 /* Allocate extended descriptor */
2087 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2088 if (IS_ERR(edesc))
2089 return PTR_ERR(edesc);
2090
2091 edesc->desc.hdr = ctx->desc_hdr_template;
2092
2093 /* On last one, request SEC to pad; otherwise continue */
2094 if (req_ctx->last)
2095 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2096 else
2097 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2098
2099 /* request SEC to INIT hash. */
2100 if (req_ctx->first && !req_ctx->swinit)
2101 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2102
2103 /* When the tfm context has a keylen, it's an HMAC.
2104 * A first or last (ie. not middle) descriptor must request HMAC.
2105 */
2106 if (ctx->keylen && (req_ctx->first || req_ctx->last))
2107 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2108
2109 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
2110}
2111
2112static int ahash_update(struct ahash_request *areq)
2113{
2114 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2115
2116 req_ctx->last = 0;
2117
2118 return ahash_process_req(areq, areq->nbytes);
2119}
2120
2121static int ahash_final(struct ahash_request *areq)
2122{
2123 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2124
2125 req_ctx->last = 1;
2126
2127 return ahash_process_req(areq, 0);
2128}
2129
2130static int ahash_finup(struct ahash_request *areq)
2131{
2132 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2133
2134 req_ctx->last = 1;
2135
2136 return ahash_process_req(areq, areq->nbytes);
2137}
2138
2139static int ahash_digest(struct ahash_request *areq)
2140{
2141 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2142 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2143
2144 ahash->init(areq);
2145 req_ctx->last = 1;
2146
2147 return ahash_process_req(areq, areq->nbytes);
2148}
2149
2150static int ahash_export(struct ahash_request *areq, void *out)
2151{
2152 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2153 struct talitos_export_state *export = out;
2154 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2155 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2156 struct device *dev = ctx->dev;
2157 dma_addr_t dma;
2158
2159 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2160 DMA_FROM_DEVICE);
2161 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2162
2163 memcpy(export->hw_context, req_ctx->hw_context,
2164 req_ctx->hw_context_size);
2165 memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2166 export->swinit = req_ctx->swinit;
2167 export->first = req_ctx->first;
2168 export->last = req_ctx->last;
2169 export->to_hash_later = req_ctx->to_hash_later;
2170 export->nbuf = req_ctx->nbuf;
2171
2172 return 0;
2173}
2174
2175static int ahash_import(struct ahash_request *areq, const void *in)
2176{
2177 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2178 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2179 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2180 struct device *dev = ctx->dev;
2181 const struct talitos_export_state *export = in;
2182 unsigned int size;
2183 dma_addr_t dma;
2184
2185 memset(req_ctx, 0, sizeof(*req_ctx));
2186 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2187 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2188 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2189 req_ctx->hw_context_size = size;
2190 memcpy(req_ctx->hw_context, export->hw_context, size);
2191 memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2192 req_ctx->swinit = export->swinit;
2193 req_ctx->first = export->first;
2194 req_ctx->last = export->last;
2195 req_ctx->to_hash_later = export->to_hash_later;
2196 req_ctx->nbuf = export->nbuf;
2197
2198 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2199 DMA_TO_DEVICE);
2200 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2201
2202 return 0;
2203}
2204
2205static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2206 u8 *hash)
2207{
2208 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2209
2210 struct scatterlist sg[1];
2211 struct ahash_request *req;
2212 struct crypto_wait wait;
2213 int ret;
2214
2215 crypto_init_wait(&wait);
2216
2217 req = ahash_request_alloc(tfm, GFP_KERNEL);
2218 if (!req)
2219 return -ENOMEM;
2220
2221 /* Keep tfm keylen == 0 during hash of the long key */
2222 ctx->keylen = 0;
2223 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2224 crypto_req_done, &wait);
2225
2226 sg_init_one(&sg[0], key, keylen);
2227
2228 ahash_request_set_crypt(req, sg, hash, keylen);
2229 ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2230
2231 ahash_request_free(req);
2232
2233 return ret;
2234}
2235
2236static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2237 unsigned int keylen)
2238{
2239 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2240 struct device *dev = ctx->dev;
2241 unsigned int blocksize =
2242 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2243 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2244 unsigned int keysize = keylen;
2245 u8 hash[SHA512_DIGEST_SIZE];
2246 int ret;
2247
2248 if (keylen <= blocksize)
2249 memcpy(ctx->key, key, keysize);
2250 else {
2251 /* Must get the hash of the long key */
2252 ret = keyhash(tfm, key, keylen, hash);
2253
2254 if (ret) {
2255 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2256 return -EINVAL;
2257 }
2258
2259 keysize = digestsize;
2260 memcpy(ctx->key, hash, digestsize);
2261 }
2262
2263 if (ctx->keylen)
2264 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2265
2266 ctx->keylen = keysize;
2267 ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2268
2269 return 0;
2270}
2271
2272
2273struct talitos_alg_template {
2274 u32 type;
2275 u32 priority;
2276 union {
2277 struct crypto_alg crypto;
2278 struct ahash_alg hash;
2279 struct aead_alg aead;
2280 } alg;
2281 __be32 desc_hdr_template;
2282};
2283
2284static struct talitos_alg_template driver_algs[] = {
2285 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2286 { .type = CRYPTO_ALG_TYPE_AEAD,
2287 .alg.aead = {
2288 .base = {
2289 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2290 .cra_driver_name = "authenc-hmac-sha1-"
2291 "cbc-aes-talitos",
2292 .cra_blocksize = AES_BLOCK_SIZE,
2293 .cra_flags = CRYPTO_ALG_ASYNC,
2294 },
2295 .ivsize = AES_BLOCK_SIZE,
2296 .maxauthsize = SHA1_DIGEST_SIZE,
2297 },
2298 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2299 DESC_HDR_SEL0_AESU |
2300 DESC_HDR_MODE0_AESU_CBC |
2301 DESC_HDR_SEL1_MDEUA |
2302 DESC_HDR_MODE1_MDEU_INIT |
2303 DESC_HDR_MODE1_MDEU_PAD |
2304 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2305 },
2306 { .type = CRYPTO_ALG_TYPE_AEAD,
2307 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2308 .alg.aead = {
2309 .base = {
2310 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2311 .cra_driver_name = "authenc-hmac-sha1-"
2312 "cbc-aes-talitos-hsna",
2313 .cra_blocksize = AES_BLOCK_SIZE,
2314 .cra_flags = CRYPTO_ALG_ASYNC,
2315 },
2316 .ivsize = AES_BLOCK_SIZE,
2317 .maxauthsize = SHA1_DIGEST_SIZE,
2318 },
2319 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2320 DESC_HDR_SEL0_AESU |
2321 DESC_HDR_MODE0_AESU_CBC |
2322 DESC_HDR_SEL1_MDEUA |
2323 DESC_HDR_MODE1_MDEU_INIT |
2324 DESC_HDR_MODE1_MDEU_PAD |
2325 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2326 },
2327 { .type = CRYPTO_ALG_TYPE_AEAD,
2328 .alg.aead = {
2329 .base = {
2330 .cra_name = "authenc(hmac(sha1),"
2331 "cbc(des3_ede))",
2332 .cra_driver_name = "authenc-hmac-sha1-"
2333 "cbc-3des-talitos",
2334 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2335 .cra_flags = CRYPTO_ALG_ASYNC,
2336 },
2337 .ivsize = DES3_EDE_BLOCK_SIZE,
2338 .maxauthsize = SHA1_DIGEST_SIZE,
2339 },
2340 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2341 DESC_HDR_SEL0_DEU |
2342 DESC_HDR_MODE0_DEU_CBC |
2343 DESC_HDR_MODE0_DEU_3DES |
2344 DESC_HDR_SEL1_MDEUA |
2345 DESC_HDR_MODE1_MDEU_INIT |
2346 DESC_HDR_MODE1_MDEU_PAD |
2347 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2348 },
2349 { .type = CRYPTO_ALG_TYPE_AEAD,
2350 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2351 .alg.aead = {
2352 .base = {
2353 .cra_name = "authenc(hmac(sha1),"
2354 "cbc(des3_ede))",
2355 .cra_driver_name = "authenc-hmac-sha1-"
2356 "cbc-3des-talitos-hsna",
2357 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2358 .cra_flags = CRYPTO_ALG_ASYNC,
2359 },
2360 .ivsize = DES3_EDE_BLOCK_SIZE,
2361 .maxauthsize = SHA1_DIGEST_SIZE,
2362 },
2363 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2364 DESC_HDR_SEL0_DEU |
2365 DESC_HDR_MODE0_DEU_CBC |
2366 DESC_HDR_MODE0_DEU_3DES |
2367 DESC_HDR_SEL1_MDEUA |
2368 DESC_HDR_MODE1_MDEU_INIT |
2369 DESC_HDR_MODE1_MDEU_PAD |
2370 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2371 },
2372 { .type = CRYPTO_ALG_TYPE_AEAD,
2373 .alg.aead = {
2374 .base = {
2375 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2376 .cra_driver_name = "authenc-hmac-sha224-"
2377 "cbc-aes-talitos",
2378 .cra_blocksize = AES_BLOCK_SIZE,
2379 .cra_flags = CRYPTO_ALG_ASYNC,
2380 },
2381 .ivsize = AES_BLOCK_SIZE,
2382 .maxauthsize = SHA224_DIGEST_SIZE,
2383 },
2384 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2385 DESC_HDR_SEL0_AESU |
2386 DESC_HDR_MODE0_AESU_CBC |
2387 DESC_HDR_SEL1_MDEUA |
2388 DESC_HDR_MODE1_MDEU_INIT |
2389 DESC_HDR_MODE1_MDEU_PAD |
2390 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2391 },
2392 { .type = CRYPTO_ALG_TYPE_AEAD,
2393 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2394 .alg.aead = {
2395 .base = {
2396 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2397 .cra_driver_name = "authenc-hmac-sha224-"
2398 "cbc-aes-talitos-hsna",
2399 .cra_blocksize = AES_BLOCK_SIZE,
2400 .cra_flags = CRYPTO_ALG_ASYNC,
2401 },
2402 .ivsize = AES_BLOCK_SIZE,
2403 .maxauthsize = SHA224_DIGEST_SIZE,
2404 },
2405 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2406 DESC_HDR_SEL0_AESU |
2407 DESC_HDR_MODE0_AESU_CBC |
2408 DESC_HDR_SEL1_MDEUA |
2409 DESC_HDR_MODE1_MDEU_INIT |
2410 DESC_HDR_MODE1_MDEU_PAD |
2411 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2412 },
2413 { .type = CRYPTO_ALG_TYPE_AEAD,
2414 .alg.aead = {
2415 .base = {
2416 .cra_name = "authenc(hmac(sha224),"
2417 "cbc(des3_ede))",
2418 .cra_driver_name = "authenc-hmac-sha224-"
2419 "cbc-3des-talitos",
2420 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2421 .cra_flags = CRYPTO_ALG_ASYNC,
2422 },
2423 .ivsize = DES3_EDE_BLOCK_SIZE,
2424 .maxauthsize = SHA224_DIGEST_SIZE,
2425 },
2426 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2427 DESC_HDR_SEL0_DEU |
2428 DESC_HDR_MODE0_DEU_CBC |
2429 DESC_HDR_MODE0_DEU_3DES |
2430 DESC_HDR_SEL1_MDEUA |
2431 DESC_HDR_MODE1_MDEU_INIT |
2432 DESC_HDR_MODE1_MDEU_PAD |
2433 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2434 },
2435 { .type = CRYPTO_ALG_TYPE_AEAD,
2436 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2437 .alg.aead = {
2438 .base = {
2439 .cra_name = "authenc(hmac(sha224),"
2440 "cbc(des3_ede))",
2441 .cra_driver_name = "authenc-hmac-sha224-"
2442 "cbc-3des-talitos-hsna",
2443 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2444 .cra_flags = CRYPTO_ALG_ASYNC,
2445 },
2446 .ivsize = DES3_EDE_BLOCK_SIZE,
2447 .maxauthsize = SHA224_DIGEST_SIZE,
2448 },
2449 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2450 DESC_HDR_SEL0_DEU |
2451 DESC_HDR_MODE0_DEU_CBC |
2452 DESC_HDR_MODE0_DEU_3DES |
2453 DESC_HDR_SEL1_MDEUA |
2454 DESC_HDR_MODE1_MDEU_INIT |
2455 DESC_HDR_MODE1_MDEU_PAD |
2456 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2457 },
2458 { .type = CRYPTO_ALG_TYPE_AEAD,
2459 .alg.aead = {
2460 .base = {
2461 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2462 .cra_driver_name = "authenc-hmac-sha256-"
2463 "cbc-aes-talitos",
2464 .cra_blocksize = AES_BLOCK_SIZE,
2465 .cra_flags = CRYPTO_ALG_ASYNC,
2466 },
2467 .ivsize = AES_BLOCK_SIZE,
2468 .maxauthsize = SHA256_DIGEST_SIZE,
2469 },
2470 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2471 DESC_HDR_SEL0_AESU |
2472 DESC_HDR_MODE0_AESU_CBC |
2473 DESC_HDR_SEL1_MDEUA |
2474 DESC_HDR_MODE1_MDEU_INIT |
2475 DESC_HDR_MODE1_MDEU_PAD |
2476 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2477 },
2478 { .type = CRYPTO_ALG_TYPE_AEAD,
2479 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2480 .alg.aead = {
2481 .base = {
2482 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2483 .cra_driver_name = "authenc-hmac-sha256-"
2484 "cbc-aes-talitos-hsna",
2485 .cra_blocksize = AES_BLOCK_SIZE,
2486 .cra_flags = CRYPTO_ALG_ASYNC,
2487 },
2488 .ivsize = AES_BLOCK_SIZE,
2489 .maxauthsize = SHA256_DIGEST_SIZE,
2490 },
2491 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2492 DESC_HDR_SEL0_AESU |
2493 DESC_HDR_MODE0_AESU_CBC |
2494 DESC_HDR_SEL1_MDEUA |
2495 DESC_HDR_MODE1_MDEU_INIT |
2496 DESC_HDR_MODE1_MDEU_PAD |
2497 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2498 },
2499 { .type = CRYPTO_ALG_TYPE_AEAD,
2500 .alg.aead = {
2501 .base = {
2502 .cra_name = "authenc(hmac(sha256),"
2503 "cbc(des3_ede))",
2504 .cra_driver_name = "authenc-hmac-sha256-"
2505 "cbc-3des-talitos",
2506 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2507 .cra_flags = CRYPTO_ALG_ASYNC,
2508 },
2509 .ivsize = DES3_EDE_BLOCK_SIZE,
2510 .maxauthsize = SHA256_DIGEST_SIZE,
2511 },
2512 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2513 DESC_HDR_SEL0_DEU |
2514 DESC_HDR_MODE0_DEU_CBC |
2515 DESC_HDR_MODE0_DEU_3DES |
2516 DESC_HDR_SEL1_MDEUA |
2517 DESC_HDR_MODE1_MDEU_INIT |
2518 DESC_HDR_MODE1_MDEU_PAD |
2519 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2520 },
2521 { .type = CRYPTO_ALG_TYPE_AEAD,
2522 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2523 .alg.aead = {
2524 .base = {
2525 .cra_name = "authenc(hmac(sha256),"
2526 "cbc(des3_ede))",
2527 .cra_driver_name = "authenc-hmac-sha256-"
2528 "cbc-3des-talitos-hsna",
2529 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2530 .cra_flags = CRYPTO_ALG_ASYNC,
2531 },
2532 .ivsize = DES3_EDE_BLOCK_SIZE,
2533 .maxauthsize = SHA256_DIGEST_SIZE,
2534 },
2535 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2536 DESC_HDR_SEL0_DEU |
2537 DESC_HDR_MODE0_DEU_CBC |
2538 DESC_HDR_MODE0_DEU_3DES |
2539 DESC_HDR_SEL1_MDEUA |
2540 DESC_HDR_MODE1_MDEU_INIT |
2541 DESC_HDR_MODE1_MDEU_PAD |
2542 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2543 },
2544 { .type = CRYPTO_ALG_TYPE_AEAD,
2545 .alg.aead = {
2546 .base = {
2547 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2548 .cra_driver_name = "authenc-hmac-sha384-"
2549 "cbc-aes-talitos",
2550 .cra_blocksize = AES_BLOCK_SIZE,
2551 .cra_flags = CRYPTO_ALG_ASYNC,
2552 },
2553 .ivsize = AES_BLOCK_SIZE,
2554 .maxauthsize = SHA384_DIGEST_SIZE,
2555 },
2556 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2557 DESC_HDR_SEL0_AESU |
2558 DESC_HDR_MODE0_AESU_CBC |
2559 DESC_HDR_SEL1_MDEUB |
2560 DESC_HDR_MODE1_MDEU_INIT |
2561 DESC_HDR_MODE1_MDEU_PAD |
2562 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2563 },
2564 { .type = CRYPTO_ALG_TYPE_AEAD,
2565 .alg.aead = {
2566 .base = {
2567 .cra_name = "authenc(hmac(sha384),"
2568 "cbc(des3_ede))",
2569 .cra_driver_name = "authenc-hmac-sha384-"
2570 "cbc-3des-talitos",
2571 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2572 .cra_flags = CRYPTO_ALG_ASYNC,
2573 },
2574 .ivsize = DES3_EDE_BLOCK_SIZE,
2575 .maxauthsize = SHA384_DIGEST_SIZE,
2576 },
2577 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2578 DESC_HDR_SEL0_DEU |
2579 DESC_HDR_MODE0_DEU_CBC |
2580 DESC_HDR_MODE0_DEU_3DES |
2581 DESC_HDR_SEL1_MDEUB |
2582 DESC_HDR_MODE1_MDEU_INIT |
2583 DESC_HDR_MODE1_MDEU_PAD |
2584 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2585 },
2586 { .type = CRYPTO_ALG_TYPE_AEAD,
2587 .alg.aead = {
2588 .base = {
2589 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2590 .cra_driver_name = "authenc-hmac-sha512-"
2591 "cbc-aes-talitos",
2592 .cra_blocksize = AES_BLOCK_SIZE,
2593 .cra_flags = CRYPTO_ALG_ASYNC,
2594 },
2595 .ivsize = AES_BLOCK_SIZE,
2596 .maxauthsize = SHA512_DIGEST_SIZE,
2597 },
2598 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2599 DESC_HDR_SEL0_AESU |
2600 DESC_HDR_MODE0_AESU_CBC |
2601 DESC_HDR_SEL1_MDEUB |
2602 DESC_HDR_MODE1_MDEU_INIT |
2603 DESC_HDR_MODE1_MDEU_PAD |
2604 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2605 },
2606 { .type = CRYPTO_ALG_TYPE_AEAD,
2607 .alg.aead = {
2608 .base = {
2609 .cra_name = "authenc(hmac(sha512),"
2610 "cbc(des3_ede))",
2611 .cra_driver_name = "authenc-hmac-sha512-"
2612 "cbc-3des-talitos",
2613 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2614 .cra_flags = CRYPTO_ALG_ASYNC,
2615 },
2616 .ivsize = DES3_EDE_BLOCK_SIZE,
2617 .maxauthsize = SHA512_DIGEST_SIZE,
2618 },
2619 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2620 DESC_HDR_SEL0_DEU |
2621 DESC_HDR_MODE0_DEU_CBC |
2622 DESC_HDR_MODE0_DEU_3DES |
2623 DESC_HDR_SEL1_MDEUB |
2624 DESC_HDR_MODE1_MDEU_INIT |
2625 DESC_HDR_MODE1_MDEU_PAD |
2626 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2627 },
2628 { .type = CRYPTO_ALG_TYPE_AEAD,
2629 .alg.aead = {
2630 .base = {
2631 .cra_name = "authenc(hmac(md5),cbc(aes))",
2632 .cra_driver_name = "authenc-hmac-md5-"
2633 "cbc-aes-talitos",
2634 .cra_blocksize = AES_BLOCK_SIZE,
2635 .cra_flags = CRYPTO_ALG_ASYNC,
2636 },
2637 .ivsize = AES_BLOCK_SIZE,
2638 .maxauthsize = MD5_DIGEST_SIZE,
2639 },
2640 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2641 DESC_HDR_SEL0_AESU |
2642 DESC_HDR_MODE0_AESU_CBC |
2643 DESC_HDR_SEL1_MDEUA |
2644 DESC_HDR_MODE1_MDEU_INIT |
2645 DESC_HDR_MODE1_MDEU_PAD |
2646 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2647 },
2648 { .type = CRYPTO_ALG_TYPE_AEAD,
2649 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2650 .alg.aead = {
2651 .base = {
2652 .cra_name = "authenc(hmac(md5),cbc(aes))",
2653 .cra_driver_name = "authenc-hmac-md5-"
2654 "cbc-aes-talitos-hsna",
2655 .cra_blocksize = AES_BLOCK_SIZE,
2656 .cra_flags = CRYPTO_ALG_ASYNC,
2657 },
2658 .ivsize = AES_BLOCK_SIZE,
2659 .maxauthsize = MD5_DIGEST_SIZE,
2660 },
2661 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2662 DESC_HDR_SEL0_AESU |
2663 DESC_HDR_MODE0_AESU_CBC |
2664 DESC_HDR_SEL1_MDEUA |
2665 DESC_HDR_MODE1_MDEU_INIT |
2666 DESC_HDR_MODE1_MDEU_PAD |
2667 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2668 },
2669 { .type = CRYPTO_ALG_TYPE_AEAD,
2670 .alg.aead = {
2671 .base = {
2672 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2673 .cra_driver_name = "authenc-hmac-md5-"
2674 "cbc-3des-talitos",
2675 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2676 .cra_flags = CRYPTO_ALG_ASYNC,
2677 },
2678 .ivsize = DES3_EDE_BLOCK_SIZE,
2679 .maxauthsize = MD5_DIGEST_SIZE,
2680 },
2681 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2682 DESC_HDR_SEL0_DEU |
2683 DESC_HDR_MODE0_DEU_CBC |
2684 DESC_HDR_MODE0_DEU_3DES |
2685 DESC_HDR_SEL1_MDEUA |
2686 DESC_HDR_MODE1_MDEU_INIT |
2687 DESC_HDR_MODE1_MDEU_PAD |
2688 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2689 },
2690 { .type = CRYPTO_ALG_TYPE_AEAD,
2691 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2692 .alg.aead = {
2693 .base = {
2694 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2695 .cra_driver_name = "authenc-hmac-md5-"
2696 "cbc-3des-talitos-hsna",
2697 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2698 .cra_flags = CRYPTO_ALG_ASYNC,
2699 },
2700 .ivsize = DES3_EDE_BLOCK_SIZE,
2701 .maxauthsize = MD5_DIGEST_SIZE,
2702 },
2703 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2704 DESC_HDR_SEL0_DEU |
2705 DESC_HDR_MODE0_DEU_CBC |
2706 DESC_HDR_MODE0_DEU_3DES |
2707 DESC_HDR_SEL1_MDEUA |
2708 DESC_HDR_MODE1_MDEU_INIT |
2709 DESC_HDR_MODE1_MDEU_PAD |
2710 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2711 },
2712 /* ABLKCIPHER algorithms. */
2713 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2714 .alg.crypto = {
2715 .cra_name = "ecb(aes)",
2716 .cra_driver_name = "ecb-aes-talitos",
2717 .cra_blocksize = AES_BLOCK_SIZE,
2718 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2719 CRYPTO_ALG_ASYNC,
2720 .cra_ablkcipher = {
2721 .min_keysize = AES_MIN_KEY_SIZE,
2722 .max_keysize = AES_MAX_KEY_SIZE,
2723 .ivsize = AES_BLOCK_SIZE,
2724 }
2725 },
2726 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2727 DESC_HDR_SEL0_AESU,
2728 },
2729 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2730 .alg.crypto = {
2731 .cra_name = "cbc(aes)",
2732 .cra_driver_name = "cbc-aes-talitos",
2733 .cra_blocksize = AES_BLOCK_SIZE,
2734 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2735 CRYPTO_ALG_ASYNC,
2736 .cra_ablkcipher = {
2737 .min_keysize = AES_MIN_KEY_SIZE,
2738 .max_keysize = AES_MAX_KEY_SIZE,
2739 .ivsize = AES_BLOCK_SIZE,
2740 .setkey = ablkcipher_aes_setkey,
2741 }
2742 },
2743 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2744 DESC_HDR_SEL0_AESU |
2745 DESC_HDR_MODE0_AESU_CBC,
2746 },
2747 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2748 .alg.crypto = {
2749 .cra_name = "ctr(aes)",
2750 .cra_driver_name = "ctr-aes-talitos",
2751 .cra_blocksize = 1,
2752 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2753 CRYPTO_ALG_ASYNC,
2754 .cra_ablkcipher = {
2755 .min_keysize = AES_MIN_KEY_SIZE,
2756 .max_keysize = AES_MAX_KEY_SIZE,
2757 .setkey = ablkcipher_aes_setkey,
2758 }
2759 },
2760 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2761 DESC_HDR_SEL0_AESU |
2762 DESC_HDR_MODE0_AESU_CTR,
2763 },
2764 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2765 .alg.crypto = {
2766 .cra_name = "ecb(des)",
2767 .cra_driver_name = "ecb-des-talitos",
2768 .cra_blocksize = DES_BLOCK_SIZE,
2769 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2770 CRYPTO_ALG_ASYNC,
2771 .cra_ablkcipher = {
2772 .min_keysize = DES_KEY_SIZE,
2773 .max_keysize = DES_KEY_SIZE,
2774 .ivsize = DES_BLOCK_SIZE,
2775 }
2776 },
2777 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2778 DESC_HDR_SEL0_DEU,
2779 },
2780 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2781 .alg.crypto = {
2782 .cra_name = "cbc(des)",
2783 .cra_driver_name = "cbc-des-talitos",
2784 .cra_blocksize = DES_BLOCK_SIZE,
2785 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2786 CRYPTO_ALG_ASYNC,
2787 .cra_ablkcipher = {
2788 .min_keysize = DES_KEY_SIZE,
2789 .max_keysize = DES_KEY_SIZE,
2790 .ivsize = DES_BLOCK_SIZE,
2791 }
2792 },
2793 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2794 DESC_HDR_SEL0_DEU |
2795 DESC_HDR_MODE0_DEU_CBC,
2796 },
2797 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2798 .alg.crypto = {
2799 .cra_name = "ecb(des3_ede)",
2800 .cra_driver_name = "ecb-3des-talitos",
2801 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2802 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2803 CRYPTO_ALG_ASYNC,
2804 .cra_ablkcipher = {
2805 .min_keysize = DES3_EDE_KEY_SIZE,
2806 .max_keysize = DES3_EDE_KEY_SIZE,
2807 .ivsize = DES3_EDE_BLOCK_SIZE,
2808 }
2809 },
2810 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2811 DESC_HDR_SEL0_DEU |
2812 DESC_HDR_MODE0_DEU_3DES,
2813 },
2814 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2815 .alg.crypto = {
2816 .cra_name = "cbc(des3_ede)",
2817 .cra_driver_name = "cbc-3des-talitos",
2818 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2819 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2820 CRYPTO_ALG_ASYNC,
2821 .cra_ablkcipher = {
2822 .min_keysize = DES3_EDE_KEY_SIZE,
2823 .max_keysize = DES3_EDE_KEY_SIZE,
2824 .ivsize = DES3_EDE_BLOCK_SIZE,
2825 }
2826 },
2827 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2828 DESC_HDR_SEL0_DEU |
2829 DESC_HDR_MODE0_DEU_CBC |
2830 DESC_HDR_MODE0_DEU_3DES,
2831 },
2832 /* AHASH algorithms. */
2833 { .type = CRYPTO_ALG_TYPE_AHASH,
2834 .alg.hash = {
2835 .halg.digestsize = MD5_DIGEST_SIZE,
2836 .halg.statesize = sizeof(struct talitos_export_state),
2837 .halg.base = {
2838 .cra_name = "md5",
2839 .cra_driver_name = "md5-talitos",
2840 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2841 .cra_flags = CRYPTO_ALG_ASYNC,
2842 }
2843 },
2844 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2845 DESC_HDR_SEL0_MDEUA |
2846 DESC_HDR_MODE0_MDEU_MD5,
2847 },
2848 { .type = CRYPTO_ALG_TYPE_AHASH,
2849 .alg.hash = {
2850 .halg.digestsize = SHA1_DIGEST_SIZE,
2851 .halg.statesize = sizeof(struct talitos_export_state),
2852 .halg.base = {
2853 .cra_name = "sha1",
2854 .cra_driver_name = "sha1-talitos",
2855 .cra_blocksize = SHA1_BLOCK_SIZE,
2856 .cra_flags = CRYPTO_ALG_ASYNC,
2857 }
2858 },
2859 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2860 DESC_HDR_SEL0_MDEUA |
2861 DESC_HDR_MODE0_MDEU_SHA1,
2862 },
2863 { .type = CRYPTO_ALG_TYPE_AHASH,
2864 .alg.hash = {
2865 .halg.digestsize = SHA224_DIGEST_SIZE,
2866 .halg.statesize = sizeof(struct talitos_export_state),
2867 .halg.base = {
2868 .cra_name = "sha224",
2869 .cra_driver_name = "sha224-talitos",
2870 .cra_blocksize = SHA224_BLOCK_SIZE,
2871 .cra_flags = CRYPTO_ALG_ASYNC,
2872 }
2873 },
2874 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2875 DESC_HDR_SEL0_MDEUA |
2876 DESC_HDR_MODE0_MDEU_SHA224,
2877 },
2878 { .type = CRYPTO_ALG_TYPE_AHASH,
2879 .alg.hash = {
2880 .halg.digestsize = SHA256_DIGEST_SIZE,
2881 .halg.statesize = sizeof(struct talitos_export_state),
2882 .halg.base = {
2883 .cra_name = "sha256",
2884 .cra_driver_name = "sha256-talitos",
2885 .cra_blocksize = SHA256_BLOCK_SIZE,
2886 .cra_flags = CRYPTO_ALG_ASYNC,
2887 }
2888 },
2889 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2890 DESC_HDR_SEL0_MDEUA |
2891 DESC_HDR_MODE0_MDEU_SHA256,
2892 },
2893 { .type = CRYPTO_ALG_TYPE_AHASH,
2894 .alg.hash = {
2895 .halg.digestsize = SHA384_DIGEST_SIZE,
2896 .halg.statesize = sizeof(struct talitos_export_state),
2897 .halg.base = {
2898 .cra_name = "sha384",
2899 .cra_driver_name = "sha384-talitos",
2900 .cra_blocksize = SHA384_BLOCK_SIZE,
2901 .cra_flags = CRYPTO_ALG_ASYNC,
2902 }
2903 },
2904 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2905 DESC_HDR_SEL0_MDEUB |
2906 DESC_HDR_MODE0_MDEUB_SHA384,
2907 },
2908 { .type = CRYPTO_ALG_TYPE_AHASH,
2909 .alg.hash = {
2910 .halg.digestsize = SHA512_DIGEST_SIZE,
2911 .halg.statesize = sizeof(struct talitos_export_state),
2912 .halg.base = {
2913 .cra_name = "sha512",
2914 .cra_driver_name = "sha512-talitos",
2915 .cra_blocksize = SHA512_BLOCK_SIZE,
2916 .cra_flags = CRYPTO_ALG_ASYNC,
2917 }
2918 },
2919 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2920 DESC_HDR_SEL0_MDEUB |
2921 DESC_HDR_MODE0_MDEUB_SHA512,
2922 },
2923 { .type = CRYPTO_ALG_TYPE_AHASH,
2924 .alg.hash = {
2925 .halg.digestsize = MD5_DIGEST_SIZE,
2926 .halg.statesize = sizeof(struct talitos_export_state),
2927 .halg.base = {
2928 .cra_name = "hmac(md5)",
2929 .cra_driver_name = "hmac-md5-talitos",
2930 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2931 .cra_flags = CRYPTO_ALG_ASYNC,
2932 }
2933 },
2934 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2935 DESC_HDR_SEL0_MDEUA |
2936 DESC_HDR_MODE0_MDEU_MD5,
2937 },
2938 { .type = CRYPTO_ALG_TYPE_AHASH,
2939 .alg.hash = {
2940 .halg.digestsize = SHA1_DIGEST_SIZE,
2941 .halg.statesize = sizeof(struct talitos_export_state),
2942 .halg.base = {
2943 .cra_name = "hmac(sha1)",
2944 .cra_driver_name = "hmac-sha1-talitos",
2945 .cra_blocksize = SHA1_BLOCK_SIZE,
2946 .cra_flags = CRYPTO_ALG_ASYNC,
2947 }
2948 },
2949 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2950 DESC_HDR_SEL0_MDEUA |
2951 DESC_HDR_MODE0_MDEU_SHA1,
2952 },
2953 { .type = CRYPTO_ALG_TYPE_AHASH,
2954 .alg.hash = {
2955 .halg.digestsize = SHA224_DIGEST_SIZE,
2956 .halg.statesize = sizeof(struct talitos_export_state),
2957 .halg.base = {
2958 .cra_name = "hmac(sha224)",
2959 .cra_driver_name = "hmac-sha224-talitos",
2960 .cra_blocksize = SHA224_BLOCK_SIZE,
2961 .cra_flags = CRYPTO_ALG_ASYNC,
2962 }
2963 },
2964 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2965 DESC_HDR_SEL0_MDEUA |
2966 DESC_HDR_MODE0_MDEU_SHA224,
2967 },
2968 { .type = CRYPTO_ALG_TYPE_AHASH,
2969 .alg.hash = {
2970 .halg.digestsize = SHA256_DIGEST_SIZE,
2971 .halg.statesize = sizeof(struct talitos_export_state),
2972 .halg.base = {
2973 .cra_name = "hmac(sha256)",
2974 .cra_driver_name = "hmac-sha256-talitos",
2975 .cra_blocksize = SHA256_BLOCK_SIZE,
2976 .cra_flags = CRYPTO_ALG_ASYNC,
2977 }
2978 },
2979 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2980 DESC_HDR_SEL0_MDEUA |
2981 DESC_HDR_MODE0_MDEU_SHA256,
2982 },
2983 { .type = CRYPTO_ALG_TYPE_AHASH,
2984 .alg.hash = {
2985 .halg.digestsize = SHA384_DIGEST_SIZE,
2986 .halg.statesize = sizeof(struct talitos_export_state),
2987 .halg.base = {
2988 .cra_name = "hmac(sha384)",
2989 .cra_driver_name = "hmac-sha384-talitos",
2990 .cra_blocksize = SHA384_BLOCK_SIZE,
2991 .cra_flags = CRYPTO_ALG_ASYNC,
2992 }
2993 },
2994 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2995 DESC_HDR_SEL0_MDEUB |
2996 DESC_HDR_MODE0_MDEUB_SHA384,
2997 },
2998 { .type = CRYPTO_ALG_TYPE_AHASH,
2999 .alg.hash = {
3000 .halg.digestsize = SHA512_DIGEST_SIZE,
3001 .halg.statesize = sizeof(struct talitos_export_state),
3002 .halg.base = {
3003 .cra_name = "hmac(sha512)",
3004 .cra_driver_name = "hmac-sha512-talitos",
3005 .cra_blocksize = SHA512_BLOCK_SIZE,
3006 .cra_flags = CRYPTO_ALG_ASYNC,
3007 }
3008 },
3009 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3010 DESC_HDR_SEL0_MDEUB |
3011 DESC_HDR_MODE0_MDEUB_SHA512,
3012 }
3013};
3014
3015struct talitos_crypto_alg {
3016 struct list_head entry;
3017 struct device *dev;
3018 struct talitos_alg_template algt;
3019};
3020
3021static int talitos_init_common(struct talitos_ctx *ctx,
3022 struct talitos_crypto_alg *talitos_alg)
3023{
3024 struct talitos_private *priv;
3025
3026 /* update context with ptr to dev */
3027 ctx->dev = talitos_alg->dev;
3028
3029 /* assign SEC channel to tfm in round-robin fashion */
3030 priv = dev_get_drvdata(ctx->dev);
3031 ctx->ch = atomic_inc_return(&priv->last_chan) &
3032 (priv->num_channels - 1);
3033
3034 /* copy descriptor header template value */
3035 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3036
3037 /* select done notification */
3038 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3039
3040 return 0;
3041}
3042
3043static int talitos_cra_init(struct crypto_tfm *tfm)
3044{
3045 struct crypto_alg *alg = tfm->__crt_alg;
3046 struct talitos_crypto_alg *talitos_alg;
3047 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3048
3049 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
3050 talitos_alg = container_of(__crypto_ahash_alg(alg),
3051 struct talitos_crypto_alg,
3052 algt.alg.hash);
3053 else
3054 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3055 algt.alg.crypto);
3056
3057 return talitos_init_common(ctx, talitos_alg);
3058}
3059
3060static int talitos_cra_init_aead(struct crypto_aead *tfm)
3061{
3062 struct aead_alg *alg = crypto_aead_alg(tfm);
3063 struct talitos_crypto_alg *talitos_alg;
3064 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3065
3066 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3067 algt.alg.aead);
3068
3069 return talitos_init_common(ctx, talitos_alg);
3070}
3071
3072static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3073{
3074 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3075
3076 talitos_cra_init(tfm);
3077
3078 ctx->keylen = 0;
3079 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3080 sizeof(struct talitos_ahash_req_ctx));
3081
3082 return 0;
3083}
3084
3085static void talitos_cra_exit(struct crypto_tfm *tfm)
3086{
3087 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3088 struct device *dev = ctx->dev;
3089
3090 if (ctx->keylen)
3091 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3092}
3093
3094/*
3095 * given the alg's descriptor header template, determine whether descriptor
3096 * type and primary/secondary execution units required match the hw
3097 * capabilities description provided in the device tree node.
3098 */
3099static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3100{
3101 struct talitos_private *priv = dev_get_drvdata(dev);
3102 int ret;
3103
3104 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3105 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3106
3107 if (SECONDARY_EU(desc_hdr_template))
3108 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3109 & priv->exec_units);
3110
3111 return ret;
3112}
3113
3114static int talitos_remove(struct platform_device *ofdev)
3115{
3116 struct device *dev = &ofdev->dev;
3117 struct talitos_private *priv = dev_get_drvdata(dev);
3118 struct talitos_crypto_alg *t_alg, *n;
3119 int i;
3120
3121 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3122 switch (t_alg->algt.type) {
3123 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3124 break;
3125 case CRYPTO_ALG_TYPE_AEAD:
3126 crypto_unregister_aead(&t_alg->algt.alg.aead);
3127 break;
3128 case CRYPTO_ALG_TYPE_AHASH:
3129 crypto_unregister_ahash(&t_alg->algt.alg.hash);
3130 break;
3131 }
3132 list_del(&t_alg->entry);
3133 }
3134
3135 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3136 talitos_unregister_rng(dev);
3137
3138 for (i = 0; i < 2; i++)
3139 if (priv->irq[i]) {
3140 free_irq(priv->irq[i], dev);
3141 irq_dispose_mapping(priv->irq[i]);
3142 }
3143
3144 tasklet_kill(&priv->done_task[0]);
3145 if (priv->irq[1])
3146 tasklet_kill(&priv->done_task[1]);
3147
3148 return 0;
3149}
3150
3151static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3152 struct talitos_alg_template
3153 *template)
3154{
3155 struct talitos_private *priv = dev_get_drvdata(dev);
3156 struct talitos_crypto_alg *t_alg;
3157 struct crypto_alg *alg;
3158
3159 t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3160 GFP_KERNEL);
3161 if (!t_alg)
3162 return ERR_PTR(-ENOMEM);
3163
3164 t_alg->algt = *template;
3165
3166 switch (t_alg->algt.type) {
3167 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3168 alg = &t_alg->algt.alg.crypto;
3169 alg->cra_init = talitos_cra_init;
3170 alg->cra_exit = talitos_cra_exit;
3171 alg->cra_type = &crypto_ablkcipher_type;
3172 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
3173 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3174 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3175 alg->cra_ablkcipher.geniv = "eseqiv";
3176 break;
3177 case CRYPTO_ALG_TYPE_AEAD:
3178 alg = &t_alg->algt.alg.aead.base;
3179 alg->cra_exit = talitos_cra_exit;
3180 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3181 t_alg->algt.alg.aead.setkey = aead_setkey;
3182 t_alg->algt.alg.aead.encrypt = aead_encrypt;
3183 t_alg->algt.alg.aead.decrypt = aead_decrypt;
3184 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3185 !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3186 devm_kfree(dev, t_alg);
3187 return ERR_PTR(-ENOTSUPP);
3188 }
3189 break;
3190 case CRYPTO_ALG_TYPE_AHASH:
3191 alg = &t_alg->algt.alg.hash.halg.base;
3192 alg->cra_init = talitos_cra_init_ahash;
3193 alg->cra_exit = talitos_cra_exit;
3194 t_alg->algt.alg.hash.init = ahash_init;
3195 t_alg->algt.alg.hash.update = ahash_update;
3196 t_alg->algt.alg.hash.final = ahash_final;
3197 t_alg->algt.alg.hash.finup = ahash_finup;
3198 t_alg->algt.alg.hash.digest = ahash_digest;
3199 if (!strncmp(alg->cra_name, "hmac", 4))
3200 t_alg->algt.alg.hash.setkey = ahash_setkey;
3201 t_alg->algt.alg.hash.import = ahash_import;
3202 t_alg->algt.alg.hash.export = ahash_export;
3203
3204 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3205 !strncmp(alg->cra_name, "hmac", 4)) {
3206 devm_kfree(dev, t_alg);
3207 return ERR_PTR(-ENOTSUPP);
3208 }
3209 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3210 (!strcmp(alg->cra_name, "sha224") ||
3211 !strcmp(alg->cra_name, "hmac(sha224)"))) {
3212 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3213 t_alg->algt.desc_hdr_template =
3214 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3215 DESC_HDR_SEL0_MDEUA |
3216 DESC_HDR_MODE0_MDEU_SHA256;
3217 }
3218 break;
3219 default:
3220 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3221 devm_kfree(dev, t_alg);
3222 return ERR_PTR(-EINVAL);
3223 }
3224
3225 alg->cra_module = THIS_MODULE;
3226 if (t_alg->algt.priority)
3227 alg->cra_priority = t_alg->algt.priority;
3228 else
3229 alg->cra_priority = TALITOS_CRA_PRIORITY;
3230 if (has_ftr_sec1(priv))
3231 alg->cra_alignmask = 3;
3232 else
3233 alg->cra_alignmask = 0;
3234 alg->cra_ctxsize = sizeof(struct talitos_ctx);
3235 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3236
3237 t_alg->dev = dev;
3238
3239 return t_alg;
3240}
3241
3242static int talitos_probe_irq(struct platform_device *ofdev)
3243{
3244 struct device *dev = &ofdev->dev;
3245 struct device_node *np = ofdev->dev.of_node;
3246 struct talitos_private *priv = dev_get_drvdata(dev);
3247 int err;
3248 bool is_sec1 = has_ftr_sec1(priv);
3249
3250 priv->irq[0] = irq_of_parse_and_map(np, 0);
3251 if (!priv->irq[0]) {
3252 dev_err(dev, "failed to map irq\n");
3253 return -EINVAL;
3254 }
3255 if (is_sec1) {
3256 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3257 dev_driver_string(dev), dev);
3258 goto primary_out;
3259 }
3260
3261 priv->irq[1] = irq_of_parse_and_map(np, 1);
3262
3263 /* get the primary irq line */
3264 if (!priv->irq[1]) {
3265 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3266 dev_driver_string(dev), dev);
3267 goto primary_out;
3268 }
3269
3270 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3271 dev_driver_string(dev), dev);
3272 if (err)
3273 goto primary_out;
3274
3275 /* get the secondary irq line */
3276 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3277 dev_driver_string(dev), dev);
3278 if (err) {
3279 dev_err(dev, "failed to request secondary irq\n");
3280 irq_dispose_mapping(priv->irq[1]);
3281 priv->irq[1] = 0;
3282 }
3283
3284 return err;
3285
3286primary_out:
3287 if (err) {
3288 dev_err(dev, "failed to request primary irq\n");
3289 irq_dispose_mapping(priv->irq[0]);
3290 priv->irq[0] = 0;
3291 }
3292
3293 return err;
3294}
3295
3296static int talitos_probe(struct platform_device *ofdev)
3297{
3298 struct device *dev = &ofdev->dev;
3299 struct device_node *np = ofdev->dev.of_node;
3300 struct talitos_private *priv;
3301 int i, err;
3302 int stride;
3303 struct resource *res;
3304
3305 priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3306 if (!priv)
3307 return -ENOMEM;
3308
3309 INIT_LIST_HEAD(&priv->alg_list);
3310
3311 dev_set_drvdata(dev, priv);
3312
3313 priv->ofdev = ofdev;
3314
3315 spin_lock_init(&priv->reg_lock);
3316
3317 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3318 if (!res)
3319 return -ENXIO;
3320 priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3321 if (!priv->reg) {
3322 dev_err(dev, "failed to of_iomap\n");
3323 err = -ENOMEM;
3324 goto err_out;
3325 }
3326
3327 /* get SEC version capabilities from device tree */
3328 of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3329 of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3330 of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3331 of_property_read_u32(np, "fsl,descriptor-types-mask",
3332 &priv->desc_types);
3333
3334 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3335 !priv->exec_units || !priv->desc_types) {
3336 dev_err(dev, "invalid property data in device tree node\n");
3337 err = -EINVAL;
3338 goto err_out;
3339 }
3340
3341 if (of_device_is_compatible(np, "fsl,sec3.0"))
3342 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3343
3344 if (of_device_is_compatible(np, "fsl,sec2.1"))
3345 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3346 TALITOS_FTR_SHA224_HWINIT |
3347 TALITOS_FTR_HMAC_OK;
3348
3349 if (of_device_is_compatible(np, "fsl,sec1.0"))
3350 priv->features |= TALITOS_FTR_SEC1;
3351
3352 if (of_device_is_compatible(np, "fsl,sec1.2")) {
3353 priv->reg_deu = priv->reg + TALITOS12_DEU;
3354 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3355 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3356 stride = TALITOS1_CH_STRIDE;
3357 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3358 priv->reg_deu = priv->reg + TALITOS10_DEU;
3359 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3360 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3361 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3362 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3363 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3364 stride = TALITOS1_CH_STRIDE;
3365 } else {
3366 priv->reg_deu = priv->reg + TALITOS2_DEU;
3367 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3368 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3369 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3370 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3371 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3372 priv->reg_keu = priv->reg + TALITOS2_KEU;
3373 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3374 stride = TALITOS2_CH_STRIDE;
3375 }
3376
3377 err = talitos_probe_irq(ofdev);
3378 if (err)
3379 goto err_out;
3380
3381 if (of_device_is_compatible(np, "fsl,sec1.0")) {
3382 if (priv->num_channels == 1)
3383 tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3384 (unsigned long)dev);
3385 else
3386 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3387 (unsigned long)dev);
3388 } else {
3389 if (priv->irq[1]) {
3390 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3391 (unsigned long)dev);
3392 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3393 (unsigned long)dev);
3394 } else if (priv->num_channels == 1) {
3395 tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3396 (unsigned long)dev);
3397 } else {
3398 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3399 (unsigned long)dev);
3400 }
3401 }
3402
3403 priv->chan = devm_kcalloc(dev,
3404 priv->num_channels,
3405 sizeof(struct talitos_channel),
3406 GFP_KERNEL);
3407 if (!priv->chan) {
3408 dev_err(dev, "failed to allocate channel management space\n");
3409 err = -ENOMEM;
3410 goto err_out;
3411 }
3412
3413 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3414
3415 for (i = 0; i < priv->num_channels; i++) {
3416 priv->chan[i].reg = priv->reg + stride * (i + 1);
3417 if (!priv->irq[1] || !(i & 1))
3418 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3419
3420 spin_lock_init(&priv->chan[i].head_lock);
3421 spin_lock_init(&priv->chan[i].tail_lock);
3422
3423 priv->chan[i].fifo = devm_kcalloc(dev,
3424 priv->fifo_len,
3425 sizeof(struct talitos_request),
3426 GFP_KERNEL);
3427 if (!priv->chan[i].fifo) {
3428 dev_err(dev, "failed to allocate request fifo %d\n", i);
3429 err = -ENOMEM;
3430 goto err_out;
3431 }
3432
3433 atomic_set(&priv->chan[i].submit_count,
3434 -(priv->chfifo_len - 1));
3435 }
3436
3437 dma_set_mask(dev, DMA_BIT_MASK(36));
3438
3439 /* reset and initialize the h/w */
3440 err = init_device(dev);
3441 if (err) {
3442 dev_err(dev, "failed to initialize device\n");
3443 goto err_out;
3444 }
3445
3446 /* register the RNG, if available */
3447 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3448 err = talitos_register_rng(dev);
3449 if (err) {
3450 dev_err(dev, "failed to register hwrng: %d\n", err);
3451 goto err_out;
3452 } else
3453 dev_info(dev, "hwrng\n");
3454 }
3455
3456 /* register crypto algorithms the device supports */
3457 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3458 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3459 struct talitos_crypto_alg *t_alg;
3460 struct crypto_alg *alg = NULL;
3461
3462 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3463 if (IS_ERR(t_alg)) {
3464 err = PTR_ERR(t_alg);
3465 if (err == -ENOTSUPP)
3466 continue;
3467 goto err_out;
3468 }
3469
3470 switch (t_alg->algt.type) {
3471 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3472 err = crypto_register_alg(
3473 &t_alg->algt.alg.crypto);
3474 alg = &t_alg->algt.alg.crypto;
3475 break;
3476
3477 case CRYPTO_ALG_TYPE_AEAD:
3478 err = crypto_register_aead(
3479 &t_alg->algt.alg.aead);
3480 alg = &t_alg->algt.alg.aead.base;
3481 break;
3482
3483 case CRYPTO_ALG_TYPE_AHASH:
3484 err = crypto_register_ahash(
3485 &t_alg->algt.alg.hash);
3486 alg = &t_alg->algt.alg.hash.halg.base;
3487 break;
3488 }
3489 if (err) {
3490 dev_err(dev, "%s alg registration failed\n",
3491 alg->cra_driver_name);
3492 devm_kfree(dev, t_alg);
3493 } else
3494 list_add_tail(&t_alg->entry, &priv->alg_list);
3495 }
3496 }
3497 if (!list_empty(&priv->alg_list))
3498 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3499 (char *)of_get_property(np, "compatible", NULL));
3500
3501 return 0;
3502
3503err_out:
3504 talitos_remove(ofdev);
3505
3506 return err;
3507}
3508
3509static const struct of_device_id talitos_match[] = {
3510#ifdef CONFIG_CRYPTO_DEV_TALITOS1
3511 {
3512 .compatible = "fsl,sec1.0",
3513 },
3514#endif
3515#ifdef CONFIG_CRYPTO_DEV_TALITOS2
3516 {
3517 .compatible = "fsl,sec2.0",
3518 },
3519#endif
3520 {},
3521};
3522MODULE_DEVICE_TABLE(of, talitos_match);
3523
3524static struct platform_driver talitos_driver = {
3525 .driver = {
3526 .name = "talitos",
3527 .of_match_table = talitos_match,
3528 },
3529 .probe = talitos_probe,
3530 .remove = talitos_remove,
3531};
3532
3533module_platform_driver(talitos_driver);
3534
3535MODULE_LICENSE("GPL");
3536MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3537MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");