blob: 04638e183351341be155584b472b73cfd8bb0418 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2017 Marvell
4 *
5 * Antoine Tenart <antoine.tenart@free-electrons.com>
6 */
7
8#include <linux/clk.h>
9#include <linux/device.h>
10#include <linux/dma-mapping.h>
11#include <linux/dmapool.h>
12#include <linux/firmware.h>
13#include <linux/interrupt.h>
14#include <linux/module.h>
15#include <linux/of_platform.h>
16#include <linux/of_irq.h>
17#include <linux/pci.h>
18#include <linux/platform_device.h>
19#include <linux/workqueue.h>
20
21#include <crypto/internal/aead.h>
22#include <crypto/internal/hash.h>
23#include <crypto/internal/skcipher.h>
24
25#include "safexcel.h"
26
27static u32 max_rings = EIP197_MAX_RINGS;
28module_param(max_rings, uint, 0644);
29MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
30
31static void eip197_trc_cache_setupvirt(struct safexcel_crypto_priv *priv)
32{
33 int i;
34
35 /*
36 * Map all interfaces/rings to register index 0
37 * so they can share contexts. Without this, the EIP197 will
38 * assume each interface/ring to be in its own memory domain
39 * i.e. have its own subset of UNIQUE memory addresses.
40 * Which would cause records with the SAME memory address to
41 * use DIFFERENT cache buffers, causing both poor cache utilization
42 * AND serious coherence/invalidation issues.
43 */
44 for (i = 0; i < 4; i++)
45 writel(0, priv->base + EIP197_FLUE_IFC_LUT(i));
46
47 /*
48 * Initialize other virtualization regs for cache
49 * These may not be in their reset state ...
50 */
51 for (i = 0; i < priv->config.rings; i++) {
52 writel(0, priv->base + EIP197_FLUE_CACHEBASE_LO(i));
53 writel(0, priv->base + EIP197_FLUE_CACHEBASE_HI(i));
54 writel(EIP197_FLUE_CONFIG_MAGIC,
55 priv->base + EIP197_FLUE_CONFIG(i));
56 }
57 writel(0, priv->base + EIP197_FLUE_OFFSETS);
58 writel(0, priv->base + EIP197_FLUE_ARC4_OFFSET);
59}
60
61static void eip197_trc_cache_banksel(struct safexcel_crypto_priv *priv,
62 u32 addrmid, int *actbank)
63{
64 u32 val;
65 int curbank;
66
67 curbank = addrmid >> 16;
68 if (curbank != *actbank) {
69 val = readl(priv->base + EIP197_CS_RAM_CTRL);
70 val = (val & ~EIP197_CS_BANKSEL_MASK) |
71 (curbank << EIP197_CS_BANKSEL_OFS);
72 writel(val, priv->base + EIP197_CS_RAM_CTRL);
73 *actbank = curbank;
74 }
75}
76
77static u32 eip197_trc_cache_probe(struct safexcel_crypto_priv *priv,
78 int maxbanks, u32 probemask)
79{
80 u32 val, addrhi, addrlo, addrmid;
81 int actbank;
82
83 /*
84 * And probe the actual size of the physically attached cache data RAM
85 * Using a binary subdivision algorithm downto 32 byte cache lines.
86 */
87 addrhi = 1 << (16 + maxbanks);
88 addrlo = 0;
89 actbank = min(maxbanks - 1, 0);
90 while ((addrhi - addrlo) > 32) {
91 /* write marker to lowest address in top half */
92 addrmid = (addrhi + addrlo) >> 1;
93 eip197_trc_cache_banksel(priv, addrmid, &actbank);
94 writel((addrmid | (addrlo << 16)) & probemask,
95 priv->base + EIP197_CLASSIFICATION_RAMS +
96 (addrmid & 0xffff));
97
98 /* write marker to lowest address in bottom half */
99 eip197_trc_cache_banksel(priv, addrlo, &actbank);
100 writel((addrlo | (addrhi << 16)) & probemask,
101 priv->base + EIP197_CLASSIFICATION_RAMS +
102 (addrlo & 0xffff));
103
104 /* read back marker from top half */
105 eip197_trc_cache_banksel(priv, addrmid, &actbank);
106 val = readl(priv->base + EIP197_CLASSIFICATION_RAMS +
107 (addrmid & 0xffff));
108
109 if (val == ((addrmid | (addrlo << 16)) & probemask)) {
110 /* read back correct, continue with top half */
111 addrlo = addrmid;
112 } else {
113 /* not read back correct, continue with bottom half */
114 addrhi = addrmid;
115 }
116 }
117 return addrhi;
118}
119
120static void eip197_trc_cache_clear(struct safexcel_crypto_priv *priv,
121 int cs_rc_max, int cs_ht_wc)
122{
123 int i;
124 u32 htable_offset, val, offset;
125
126 /* Clear all records in administration RAM */
127 for (i = 0; i < cs_rc_max; i++) {
128 offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
129
130 writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
131 EIP197_CS_RC_PREV(EIP197_RC_NULL),
132 priv->base + offset);
133
134 val = EIP197_CS_RC_NEXT(i + 1) | EIP197_CS_RC_PREV(i - 1);
135 if (i == 0)
136 val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
137 else if (i == cs_rc_max - 1)
138 val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
139 writel(val, priv->base + offset + 4);
140 /* must also initialize the address key due to ECC! */
141 writel(0, priv->base + offset + 8);
142 writel(0, priv->base + offset + 12);
143 }
144
145 /* Clear the hash table entries */
146 htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
147 for (i = 0; i < cs_ht_wc; i++)
148 writel(GENMASK(29, 0),
149 priv->base + EIP197_CLASSIFICATION_RAMS +
150 htable_offset + i * sizeof(u32));
151}
152
153static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
154{
155 u32 val, dsize, asize;
156 int cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
157 int cs_rc_abs_max, cs_ht_sz;
158 int maxbanks;
159
160 /* Setup (dummy) virtualization for cache */
161 eip197_trc_cache_setupvirt(priv);
162
163 /*
164 * Enable the record cache memory access and
165 * probe the bank select width
166 */
167 val = readl(priv->base + EIP197_CS_RAM_CTRL);
168 val &= ~EIP197_TRC_ENABLE_MASK;
169 val |= EIP197_TRC_ENABLE_0 | EIP197_CS_BANKSEL_MASK;
170 writel(val, priv->base + EIP197_CS_RAM_CTRL);
171 val = readl(priv->base + EIP197_CS_RAM_CTRL);
172 maxbanks = ((val&EIP197_CS_BANKSEL_MASK)>>EIP197_CS_BANKSEL_OFS) + 1;
173
174 /* Clear all ECC errors */
175 writel(0, priv->base + EIP197_TRC_ECCCTRL);
176
177 /*
178 * Make sure the cache memory is accessible by taking record cache into
179 * reset. Need data memory access here, not admin access.
180 */
181 val = readl(priv->base + EIP197_TRC_PARAMS);
182 val |= EIP197_TRC_PARAMS_SW_RESET | EIP197_TRC_PARAMS_DATA_ACCESS;
183 writel(val, priv->base + EIP197_TRC_PARAMS);
184
185 /* Probed data RAM size in bytes */
186 dsize = eip197_trc_cache_probe(priv, maxbanks, 0xffffffff);
187
188 /*
189 * Now probe the administration RAM size pretty much the same way
190 * Except that only the lower 30 bits are writable and we don't need
191 * bank selects
192 */
193 val = readl(priv->base + EIP197_TRC_PARAMS);
194 /* admin access now */
195 val &= ~(EIP197_TRC_PARAMS_DATA_ACCESS | EIP197_CS_BANKSEL_MASK);
196 writel(val, priv->base + EIP197_TRC_PARAMS);
197
198 /* Probed admin RAM size in admin words */
199 asize = eip197_trc_cache_probe(priv, 0, 0xbfffffff) >> 4;
200
201 /* Clear any ECC errors detected while probing! */
202 writel(0, priv->base + EIP197_TRC_ECCCTRL);
203
204 /*
205 * Determine optimal configuration from RAM sizes
206 * Note that we assume that the physical RAM configuration is sane
207 * Therefore, we don't do any parameter error checking here ...
208 */
209
210 /* For now, just use a single record format covering everything */
211 cs_trc_rec_wc = EIP197_CS_TRC_REC_WC;
212 cs_trc_lg_rec_wc = EIP197_CS_TRC_REC_WC;
213
214 /*
215 * Step #1: How many records will physically fit?
216 * Hard upper limit is 1023!
217 */
218 cs_rc_abs_max = min_t(uint, ((dsize >> 2) / cs_trc_lg_rec_wc), 1023);
219 /* Step #2: Need at least 2 words in the admin RAM per record */
220 cs_rc_max = min_t(uint, cs_rc_abs_max, (asize >> 1));
221 /* Step #3: Determine log2 of hash table size */
222 cs_ht_sz = __fls(asize - cs_rc_max) - 2;
223 /* Step #4: determine current size of hash table in dwords */
224 cs_ht_wc = 16 << cs_ht_sz; /* dwords, not admin words */
225 /* Step #5: add back excess words and see if we can fit more records */
226 cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 2));
227
228 /* Clear the cache RAMs */
229 eip197_trc_cache_clear(priv, cs_rc_max, cs_ht_wc);
230
231 /* Disable the record cache memory access */
232 val = readl(priv->base + EIP197_CS_RAM_CTRL);
233 val &= ~EIP197_TRC_ENABLE_MASK;
234 writel(val, priv->base + EIP197_CS_RAM_CTRL);
235
236 /* Write head and tail pointers of the record free chain */
237 val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
238 EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
239 writel(val, priv->base + EIP197_TRC_FREECHAIN);
240
241 /* Configure the record cache #1 */
242 val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
243 EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
244 writel(val, priv->base + EIP197_TRC_PARAMS2);
245
246 /* Configure the record cache #2 */
247 val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
248 EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
249 EIP197_TRC_PARAMS_HTABLE_SZ(cs_ht_sz);
250 writel(val, priv->base + EIP197_TRC_PARAMS);
251
252 dev_info(priv->dev, "TRC init: %dd,%da (%dr,%dh)\n",
253 dsize, asize, cs_rc_max, cs_ht_wc + cs_ht_wc);
254}
255
256static void eip197_init_firmware(struct safexcel_crypto_priv *priv)
257{
258 int pe, i;
259 u32 val;
260
261 for (pe = 0; pe < priv->config.pes; pe++) {
262 /* Configure the token FIFO's */
263 writel(3, EIP197_PE(priv) + EIP197_PE_ICE_PUTF_CTRL(pe));
264 writel(0, EIP197_PE(priv) + EIP197_PE_ICE_PPTF_CTRL(pe));
265
266 /* Clear the ICE scratchpad memory */
267 val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
268 val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
269 EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
270 EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
271 EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
272 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
273
274 /* clear the scratchpad RAM using 32 bit writes only */
275 for (i = 0; i < EIP197_NUM_OF_SCRATCH_BLOCKS; i++)
276 writel(0, EIP197_PE(priv) +
277 EIP197_PE_ICE_SCRATCH_RAM(pe) + (i << 2));
278
279 /* Reset the IFPP engine to make its program mem accessible */
280 writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
281 EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
282 EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
283 EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));
284
285 /* Reset the IPUE engine to make its program mem accessible */
286 writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
287 EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
288 EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
289 EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
290
291 /* Enable access to all IFPP program memories */
292 writel(EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN,
293 EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
294 }
295
296}
297
298static int eip197_write_firmware(struct safexcel_crypto_priv *priv,
299 const struct firmware *fw)
300{
301 const u32 *data = (const u32 *)fw->data;
302 int i;
303
304 /* Write the firmware */
305 for (i = 0; i < fw->size / sizeof(u32); i++)
306 writel(be32_to_cpu(data[i]),
307 priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
308
309 /* Exclude final 2 NOPs from size */
310 return i - EIP197_FW_TERMINAL_NOPS;
311}
312
313/*
314 * If FW is actual production firmware, then poll for its initialization
315 * to complete and check if it is good for the HW, otherwise just return OK.
316 */
317static bool poll_fw_ready(struct safexcel_crypto_priv *priv, int fpp)
318{
319 int pe, pollcnt;
320 u32 base, pollofs;
321
322 if (fpp)
323 pollofs = EIP197_FW_FPP_READY;
324 else
325 pollofs = EIP197_FW_PUE_READY;
326
327 for (pe = 0; pe < priv->config.pes; pe++) {
328 base = EIP197_PE_ICE_SCRATCH_RAM(pe);
329 pollcnt = EIP197_FW_START_POLLCNT;
330 while (pollcnt &&
331 (readl_relaxed(EIP197_PE(priv) + base +
332 pollofs) != 1)) {
333 pollcnt--;
334 }
335 if (!pollcnt) {
336 dev_err(priv->dev, "FW(%d) for PE %d failed to start\n",
337 fpp, pe);
338 return false;
339 }
340 }
341 return true;
342}
343
344static bool eip197_start_firmware(struct safexcel_crypto_priv *priv,
345 int ipuesz, int ifppsz, int minifw)
346{
347 int pe;
348 u32 val;
349
350 for (pe = 0; pe < priv->config.pes; pe++) {
351 /* Disable access to all program memory */
352 writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
353
354 /* Start IFPP microengines */
355 if (minifw)
356 val = 0;
357 else
358 val = EIP197_PE_ICE_UENG_START_OFFSET((ifppsz - 1) &
359 EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
360 EIP197_PE_ICE_UENG_DEBUG_RESET;
361 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));
362
363 /* Start IPUE microengines */
364 if (minifw)
365 val = 0;
366 else
367 val = EIP197_PE_ICE_UENG_START_OFFSET((ipuesz - 1) &
368 EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
369 EIP197_PE_ICE_UENG_DEBUG_RESET;
370 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
371 }
372
373 /* For miniFW startup, there is no initialization, so always succeed */
374 if (minifw)
375 return true;
376
377 /* Wait until all the firmwares have properly started up */
378 if (!poll_fw_ready(priv, 1))
379 return false;
380 if (!poll_fw_ready(priv, 0))
381 return false;
382
383 return true;
384}
385
386static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
387{
388 const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
389 const struct firmware *fw[FW_NB];
390 char fw_path[37], *dir = NULL;
391 int i, j, ret = 0, pe;
392 int ipuesz, ifppsz, minifw = 0;
393
394 if (priv->version == EIP197D_MRVL)
395 dir = "eip197d";
396 else if (priv->version == EIP197B_MRVL ||
397 priv->version == EIP197_DEVBRD)
398 dir = "eip197b";
399 else
400 return -ENODEV;
401
402retry_fw:
403 for (i = 0; i < FW_NB; i++) {
404 snprintf(fw_path, 37, "inside-secure/%s/%s", dir, fw_name[i]);
405 ret = firmware_request_nowarn(&fw[i], fw_path, priv->dev);
406 if (ret) {
407 if (minifw || priv->version != EIP197B_MRVL)
408 goto release_fw;
409
410 /* Fallback to the old firmware location for the
411 * EIP197b.
412 */
413 ret = firmware_request_nowarn(&fw[i], fw_name[i],
414 priv->dev);
415 if (ret)
416 goto release_fw;
417 }
418 }
419
420 eip197_init_firmware(priv);
421
422 ifppsz = eip197_write_firmware(priv, fw[FW_IFPP]);
423
424 /* Enable access to IPUE program memories */
425 for (pe = 0; pe < priv->config.pes; pe++)
426 writel(EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN,
427 EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
428
429 ipuesz = eip197_write_firmware(priv, fw[FW_IPUE]);
430
431 if (eip197_start_firmware(priv, ipuesz, ifppsz, minifw)) {
432 dev_dbg(priv->dev, "Firmware loaded successfully\n");
433 return 0;
434 }
435
436 ret = -ENODEV;
437
438release_fw:
439 for (j = 0; j < i; j++)
440 release_firmware(fw[j]);
441
442 if (!minifw) {
443 /* Retry with minifw path */
444 dev_dbg(priv->dev, "Firmware set not (fully) present or init failed, falling back to BCLA mode\n");
445 dir = "eip197_minifw";
446 minifw = 1;
447 goto retry_fw;
448 }
449
450 dev_dbg(priv->dev, "Firmware load failed.\n");
451
452 return ret;
453}
454
455static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
456{
457 u32 cd_size_rnd, val;
458 int i, cd_fetch_cnt;
459
460 cd_size_rnd = (priv->config.cd_size +
461 (BIT(priv->hwconfig.hwdataw) - 1)) >>
462 priv->hwconfig.hwdataw;
463 /* determine number of CD's we can fetch into the CD FIFO as 1 block */
464 if (priv->flags & SAFEXCEL_HW_EIP197) {
465 /* EIP197: try to fetch enough in 1 go to keep all pipes busy */
466 cd_fetch_cnt = (1 << priv->hwconfig.hwcfsize) / cd_size_rnd;
467 cd_fetch_cnt = min_t(uint, cd_fetch_cnt,
468 (priv->config.pes * EIP197_FETCH_DEPTH));
469 } else {
470 /* for the EIP97, just fetch all that fits minus 1 */
471 cd_fetch_cnt = ((1 << priv->hwconfig.hwcfsize) /
472 cd_size_rnd) - 1;
473 }
474
475 for (i = 0; i < priv->config.rings; i++) {
476 /* ring base address */
477 writel(lower_32_bits(priv->ring[i].cdr.base_dma),
478 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
479 writel(upper_32_bits(priv->ring[i].cdr.base_dma),
480 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
481
482 writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
483 priv->config.cd_size,
484 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
485 writel(((cd_fetch_cnt *
486 (cd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
487 (cd_fetch_cnt * priv->config.cd_offset),
488 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
489
490 /* Configure DMA tx control */
491 val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
492 val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
493 writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
494
495 /* clear any pending interrupt */
496 writel(GENMASK(5, 0),
497 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
498 }
499
500 return 0;
501}
502
503static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
504{
505 u32 rd_size_rnd, val;
506 int i, rd_fetch_cnt;
507
508 /* determine number of RD's we can fetch into the FIFO as one block */
509 rd_size_rnd = (EIP197_RD64_FETCH_SIZE +
510 (BIT(priv->hwconfig.hwdataw) - 1)) >>
511 priv->hwconfig.hwdataw;
512 if (priv->flags & SAFEXCEL_HW_EIP197) {
513 /* EIP197: try to fetch enough in 1 go to keep all pipes busy */
514 rd_fetch_cnt = (1 << priv->hwconfig.hwrfsize) / rd_size_rnd;
515 rd_fetch_cnt = min_t(uint, rd_fetch_cnt,
516 (priv->config.pes * EIP197_FETCH_DEPTH));
517 } else {
518 /* for the EIP97, just fetch all that fits minus 1 */
519 rd_fetch_cnt = ((1 << priv->hwconfig.hwrfsize) /
520 rd_size_rnd) - 1;
521 }
522
523 for (i = 0; i < priv->config.rings; i++) {
524 /* ring base address */
525 writel(lower_32_bits(priv->ring[i].rdr.base_dma),
526 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
527 writel(upper_32_bits(priv->ring[i].rdr.base_dma),
528 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
529
530 writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
531 priv->config.rd_size,
532 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
533
534 writel(((rd_fetch_cnt *
535 (rd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
536 (rd_fetch_cnt * priv->config.rd_offset),
537 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
538
539 /* Configure DMA tx control */
540 val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
541 val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
542 val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
543 writel(val,
544 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
545
546 /* clear any pending interrupt */
547 writel(GENMASK(7, 0),
548 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
549
550 /* enable ring interrupt */
551 val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
552 val |= EIP197_RDR_IRQ(i);
553 writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
554 }
555
556 return 0;
557}
558
559static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
560{
561 u32 val;
562 int i, ret, pe;
563
564 dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n",
565 priv->config.pes, priv->config.rings);
566
567 /*
568 * For EIP197's only set maximum number of TX commands to 2^5 = 32
569 * Skip for the EIP97 as it does not have this field.
570 */
571 if (priv->flags & SAFEXCEL_HW_EIP197) {
572 val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
573 val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
574 writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
575 }
576
577 /* Configure wr/rd cache values */
578 writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
579 EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
580 EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
581
582 /* Interrupts reset */
583
584 /* Disable all global interrupts */
585 writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
586
587 /* Clear any pending interrupt */
588 writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
589
590 /* Processing Engine configuration */
591 for (pe = 0; pe < priv->config.pes; pe++) {
592 /* Data Fetch Engine configuration */
593
594 /* Reset all DFE threads */
595 writel(EIP197_DxE_THR_CTRL_RESET_PE,
596 EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
597
598 if (priv->flags & SAFEXCEL_HW_EIP197)
599 /* Reset HIA input interface arbiter (EIP197 only) */
600 writel(EIP197_HIA_RA_PE_CTRL_RESET,
601 EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
602
603 /* DMA transfer size to use */
604 val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
605 val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
606 EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
607 val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
608 EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
609 val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
610 val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
611 writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));
612
613 /* Leave the DFE threads reset state */
614 writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
615
616 /* Configure the processing engine thresholds */
617 writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
618 EIP197_PE_IN_xBUF_THRES_MAX(9),
619 EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
620 writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
621 EIP197_PE_IN_xBUF_THRES_MAX(7),
622 EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));
623
624 if (priv->flags & SAFEXCEL_HW_EIP197)
625 /* enable HIA input interface arbiter and rings */
626 writel(EIP197_HIA_RA_PE_CTRL_EN |
627 GENMASK(priv->config.rings - 1, 0),
628 EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
629
630 /* Data Store Engine configuration */
631
632 /* Reset all DSE threads */
633 writel(EIP197_DxE_THR_CTRL_RESET_PE,
634 EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
635
636 /* Wait for all DSE threads to complete */
637 while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
638 GENMASK(15, 12)) != GENMASK(15, 12))
639 ;
640
641 /* DMA transfer size to use */
642 val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
643 val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
644 EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
645 val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
646 val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
647 /* FIXME: instability issues can occur for EIP97 but disabling
648 * it impacts performance.
649 */
650 if (priv->flags & SAFEXCEL_HW_EIP197)
651 val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
652 writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));
653
654 /* Leave the DSE threads reset state */
655 writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
656
657 /* Configure the procesing engine thresholds */
658 writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) |
659 EIP197_PE_OUT_DBUF_THRES_MAX(8),
660 EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
661
662 /* Processing Engine configuration */
663
664 /* Token & context configuration */
665 val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES |
666 EIP197_PE_EIP96_TOKEN_CTRL_NO_TOKEN_WAIT |
667 EIP197_PE_EIP96_TOKEN_CTRL_ENABLE_TIMEOUT;
668 writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe));
669
670 /* H/W capabilities selection: just enable everything */
671 writel(EIP197_FUNCTION_ALL,
672 EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
673 writel(EIP197_FUNCTION_ALL,
674 EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION2_EN(pe));
675 }
676
677 /* Command Descriptor Rings prepare */
678 for (i = 0; i < priv->config.rings; i++) {
679 /* Clear interrupts for this ring */
680 writel(GENMASK(31, 0),
681 EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
682
683 /* Disable external triggering */
684 writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
685
686 /* Clear the pending prepared counter */
687 writel(EIP197_xDR_PREP_CLR_COUNT,
688 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
689
690 /* Clear the pending processed counter */
691 writel(EIP197_xDR_PROC_CLR_COUNT,
692 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
693
694 writel(0,
695 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
696 writel(0,
697 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
698
699 writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
700 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
701 }
702
703 /* Result Descriptor Ring prepare */
704 for (i = 0; i < priv->config.rings; i++) {
705 /* Disable external triggering*/
706 writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
707
708 /* Clear the pending prepared counter */
709 writel(EIP197_xDR_PREP_CLR_COUNT,
710 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
711
712 /* Clear the pending processed counter */
713 writel(EIP197_xDR_PROC_CLR_COUNT,
714 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
715
716 writel(0,
717 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
718 writel(0,
719 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
720
721 /* Ring size */
722 writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
723 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
724 }
725
726 for (pe = 0; pe < priv->config.pes; pe++) {
727 /* Enable command descriptor rings */
728 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
729 EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
730
731 /* Enable result descriptor rings */
732 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
733 EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
734 }
735
736 /* Clear any HIA interrupt */
737 writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
738
739 if (priv->flags & SAFEXCEL_HW_EIP197) {
740 eip197_trc_cache_init(priv);
741 priv->flags |= EIP197_TRC_CACHE;
742
743 ret = eip197_load_firmwares(priv);
744 if (ret)
745 return ret;
746 }
747
748 safexcel_hw_setup_cdesc_rings(priv);
749 safexcel_hw_setup_rdesc_rings(priv);
750
751 return 0;
752}
753
754/* Called with ring's lock taken */
755static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
756 int ring)
757{
758 int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
759
760 if (!coal)
761 return;
762
763 /* Configure when we want an interrupt */
764 writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
765 EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
766 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
767}
768
769void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
770{
771 struct crypto_async_request *req, *backlog;
772 struct safexcel_context *ctx;
773 int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
774
775 /* If a request wasn't properly dequeued because of a lack of resources,
776 * proceeded it first,
777 */
778 req = priv->ring[ring].req;
779 backlog = priv->ring[ring].backlog;
780 if (req)
781 goto handle_req;
782
783 while (true) {
784 spin_lock_bh(&priv->ring[ring].queue_lock);
785 backlog = crypto_get_backlog(&priv->ring[ring].queue);
786 req = crypto_dequeue_request(&priv->ring[ring].queue);
787 spin_unlock_bh(&priv->ring[ring].queue_lock);
788
789 if (!req) {
790 priv->ring[ring].req = NULL;
791 priv->ring[ring].backlog = NULL;
792 goto finalize;
793 }
794
795handle_req:
796 ctx = crypto_tfm_ctx(req->tfm);
797 ret = ctx->send(req, ring, &commands, &results);
798 if (ret)
799 goto request_failed;
800
801 if (backlog)
802 backlog->complete(backlog, -EINPROGRESS);
803
804 /* In case the send() helper did not issue any command to push
805 * to the engine because the input data was cached, continue to
806 * dequeue other requests as this is valid and not an error.
807 */
808 if (!commands && !results)
809 continue;
810
811 cdesc += commands;
812 rdesc += results;
813 nreq++;
814 }
815
816request_failed:
817 /* Not enough resources to handle all the requests. Bail out and save
818 * the request and the backlog for the next dequeue call (per-ring).
819 */
820 priv->ring[ring].req = req;
821 priv->ring[ring].backlog = backlog;
822
823finalize:
824 if (!nreq)
825 return;
826
827 spin_lock_bh(&priv->ring[ring].lock);
828
829 priv->ring[ring].requests += nreq;
830
831 if (!priv->ring[ring].busy) {
832 safexcel_try_push_requests(priv, ring);
833 priv->ring[ring].busy = true;
834 }
835
836 spin_unlock_bh(&priv->ring[ring].lock);
837
838 /* let the RDR know we have pending descriptors */
839 writel((rdesc * priv->config.rd_offset) << 2,
840 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
841
842 /* let the CDR know we have pending descriptors */
843 writel((cdesc * priv->config.cd_offset) << 2,
844 EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
845}
846
847inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
848 struct safexcel_result_desc *rdesc)
849{
850 if (likely((!rdesc->descriptor_overflow) &&
851 (!rdesc->buffer_overflow) &&
852 (!rdesc->result_data.error_code)))
853 return 0;
854
855 if (rdesc->descriptor_overflow)
856 dev_err(priv->dev, "Descriptor overflow detected");
857
858 if (rdesc->buffer_overflow)
859 dev_err(priv->dev, "Buffer overflow detected");
860
861 if (rdesc->result_data.error_code & 0x4066) {
862 /* Fatal error (bits 1,2,5,6 & 14) */
863 dev_err(priv->dev,
864 "result descriptor error (%x)",
865 rdesc->result_data.error_code);
866 return -EIO;
867 } else if (rdesc->result_data.error_code &
868 (BIT(7) | BIT(4) | BIT(3) | BIT(0))) {
869 /*
870 * Give priority over authentication fails:
871 * Blocksize, length & overflow errors,
872 * something wrong with the input!
873 */
874 return -EINVAL;
875 } else if (rdesc->result_data.error_code & BIT(9)) {
876 /* Authentication failed */
877 return -EBADMSG;
878 }
879
880 /* All other non-fatal errors */
881 return -EINVAL;
882}
883
884inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
885 int ring,
886 struct safexcel_result_desc *rdesc,
887 struct crypto_async_request *req)
888{
889 int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
890
891 priv->ring[ring].rdr_req[i] = req;
892}
893
894inline struct crypto_async_request *
895safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
896{
897 int i = safexcel_ring_first_rdr_index(priv, ring);
898
899 return priv->ring[ring].rdr_req[i];
900}
901
902void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
903{
904 struct safexcel_command_desc *cdesc;
905
906 /* Acknowledge the command descriptors */
907 do {
908 cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
909 if (IS_ERR(cdesc)) {
910 dev_err(priv->dev,
911 "Could not retrieve the command descriptor\n");
912 return;
913 }
914 } while (!cdesc->last_seg);
915}
916
917void safexcel_inv_complete(struct crypto_async_request *req, int error)
918{
919 struct safexcel_inv_result *result = req->data;
920
921 if (error == -EINPROGRESS)
922 return;
923
924 result->error = error;
925 complete(&result->completion);
926}
927
928int safexcel_invalidate_cache(struct crypto_async_request *async,
929 struct safexcel_crypto_priv *priv,
930 dma_addr_t ctxr_dma, int ring)
931{
932 struct safexcel_command_desc *cdesc;
933 struct safexcel_result_desc *rdesc;
934 int ret = 0;
935
936 /* Prepare command descriptor */
937 cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
938 if (IS_ERR(cdesc))
939 return PTR_ERR(cdesc);
940
941 cdesc->control_data.type = EIP197_TYPE_EXTENDED;
942 cdesc->control_data.options = 0;
943 cdesc->control_data.refresh = 0;
944 cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
945
946 /* Prepare result descriptor */
947 rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
948
949 if (IS_ERR(rdesc)) {
950 ret = PTR_ERR(rdesc);
951 goto cdesc_rollback;
952 }
953
954 safexcel_rdr_req_set(priv, ring, rdesc, async);
955
956 return ret;
957
958cdesc_rollback:
959 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
960
961 return ret;
962}
963
964static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
965 int ring)
966{
967 struct crypto_async_request *req;
968 struct safexcel_context *ctx;
969 int ret, i, nreq, ndesc, tot_descs, handled = 0;
970 bool should_complete;
971
972handle_results:
973 tot_descs = 0;
974
975 nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
976 nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
977 nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
978 if (!nreq)
979 goto requests_left;
980
981 for (i = 0; i < nreq; i++) {
982 req = safexcel_rdr_req_get(priv, ring);
983
984 ctx = crypto_tfm_ctx(req->tfm);
985 ndesc = ctx->handle_result(priv, ring, req,
986 &should_complete, &ret);
987 if (ndesc < 0) {
988 dev_err(priv->dev, "failed to handle result (%d)\n",
989 ndesc);
990 goto acknowledge;
991 }
992
993 if (should_complete) {
994 local_bh_disable();
995 req->complete(req, ret);
996 local_bh_enable();
997 }
998
999 tot_descs += ndesc;
1000 handled++;
1001 }
1002
1003acknowledge:
1004 if (i)
1005 writel(EIP197_xDR_PROC_xD_PKT(i) |
1006 EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
1007 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
1008
1009 /* If the number of requests overflowed the counter, try to proceed more
1010 * requests.
1011 */
1012 if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
1013 goto handle_results;
1014
1015requests_left:
1016 spin_lock_bh(&priv->ring[ring].lock);
1017
1018 priv->ring[ring].requests -= handled;
1019 safexcel_try_push_requests(priv, ring);
1020
1021 if (!priv->ring[ring].requests)
1022 priv->ring[ring].busy = false;
1023
1024 spin_unlock_bh(&priv->ring[ring].lock);
1025}
1026
1027static void safexcel_dequeue_work(struct work_struct *work)
1028{
1029 struct safexcel_work_data *data =
1030 container_of(work, struct safexcel_work_data, work);
1031
1032 safexcel_dequeue(data->priv, data->ring);
1033}
1034
1035struct safexcel_ring_irq_data {
1036 struct safexcel_crypto_priv *priv;
1037 int ring;
1038};
1039
1040static irqreturn_t safexcel_irq_ring(int irq, void *data)
1041{
1042 struct safexcel_ring_irq_data *irq_data = data;
1043 struct safexcel_crypto_priv *priv = irq_data->priv;
1044 int ring = irq_data->ring, rc = IRQ_NONE;
1045 u32 status, stat;
1046
1047 status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
1048 if (!status)
1049 return rc;
1050
1051 /* RDR interrupts */
1052 if (status & EIP197_RDR_IRQ(ring)) {
1053 stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
1054
1055 if (unlikely(stat & EIP197_xDR_ERR)) {
1056 /*
1057 * Fatal error, the RDR is unusable and must be
1058 * reinitialized. This should not happen under
1059 * normal circumstances.
1060 */
1061 dev_err(priv->dev, "RDR: fatal error.\n");
1062 } else if (likely(stat & EIP197_xDR_THRESH)) {
1063 rc = IRQ_WAKE_THREAD;
1064 }
1065
1066 /* ACK the interrupts */
1067 writel(stat & 0xff,
1068 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
1069 }
1070
1071 /* ACK the interrupts */
1072 writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
1073
1074 return rc;
1075}
1076
1077static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
1078{
1079 struct safexcel_ring_irq_data *irq_data = data;
1080 struct safexcel_crypto_priv *priv = irq_data->priv;
1081 int ring = irq_data->ring;
1082
1083 safexcel_handle_result_descriptor(priv, ring);
1084
1085 queue_work(priv->ring[ring].workqueue,
1086 &priv->ring[ring].work_data.work);
1087
1088 return IRQ_HANDLED;
1089}
1090
1091static int safexcel_request_ring_irq(void *pdev, int irqid,
1092 int is_pci_dev,
1093 int ring_id,
1094 irq_handler_t handler,
1095 irq_handler_t threaded_handler,
1096 struct safexcel_ring_irq_data *ring_irq_priv)
1097{
1098 int ret, irq, cpu;
1099 struct device *dev;
1100
1101 if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) {
1102 struct pci_dev *pci_pdev = pdev;
1103
1104 dev = &pci_pdev->dev;
1105 irq = pci_irq_vector(pci_pdev, irqid);
1106 if (irq < 0) {
1107 dev_err(dev, "unable to get device MSI IRQ %d (err %d)\n",
1108 irqid, irq);
1109 return irq;
1110 }
1111 } else if (IS_ENABLED(CONFIG_OF)) {
1112 struct platform_device *plf_pdev = pdev;
1113 char irq_name[6] = {0}; /* "ringX\0" */
1114
1115 snprintf(irq_name, 6, "ring%d", irqid);
1116 dev = &plf_pdev->dev;
1117 irq = platform_get_irq_byname(plf_pdev, irq_name);
1118
1119 if (irq < 0) {
1120 dev_err(dev, "unable to get IRQ '%s' (err %d)\n",
1121 irq_name, irq);
1122 return irq;
1123 }
1124 } else {
1125 return -ENXIO;
1126 }
1127
1128 ret = devm_request_threaded_irq(dev, irq, handler,
1129 threaded_handler, IRQF_ONESHOT,
1130 dev_name(dev), ring_irq_priv);
1131 if (ret) {
1132 dev_err(dev, "unable to request IRQ %d\n", irq);
1133 return ret;
1134 }
1135
1136 /* Set affinity */
1137 cpu = cpumask_local_spread(ring_id, NUMA_NO_NODE);
1138 irq_set_affinity_hint(irq, get_cpu_mask(cpu));
1139
1140 return irq;
1141}
1142
1143static struct safexcel_alg_template *safexcel_algs[] = {
1144 &safexcel_alg_ecb_des,
1145 &safexcel_alg_cbc_des,
1146 &safexcel_alg_ecb_des3_ede,
1147 &safexcel_alg_cbc_des3_ede,
1148 &safexcel_alg_ecb_aes,
1149 &safexcel_alg_cbc_aes,
1150 &safexcel_alg_cfb_aes,
1151 &safexcel_alg_ofb_aes,
1152 &safexcel_alg_ctr_aes,
1153 &safexcel_alg_md5,
1154 &safexcel_alg_sha1,
1155 &safexcel_alg_sha224,
1156 &safexcel_alg_sha256,
1157 &safexcel_alg_sha384,
1158 &safexcel_alg_sha512,
1159 &safexcel_alg_hmac_md5,
1160 &safexcel_alg_hmac_sha1,
1161 &safexcel_alg_hmac_sha224,
1162 &safexcel_alg_hmac_sha256,
1163 &safexcel_alg_hmac_sha384,
1164 &safexcel_alg_hmac_sha512,
1165 &safexcel_alg_authenc_hmac_sha1_cbc_aes,
1166 &safexcel_alg_authenc_hmac_sha224_cbc_aes,
1167 &safexcel_alg_authenc_hmac_sha256_cbc_aes,
1168 &safexcel_alg_authenc_hmac_sha384_cbc_aes,
1169 &safexcel_alg_authenc_hmac_sha512_cbc_aes,
1170 &safexcel_alg_authenc_hmac_sha1_cbc_des3_ede,
1171 &safexcel_alg_authenc_hmac_sha1_ctr_aes,
1172 &safexcel_alg_authenc_hmac_sha224_ctr_aes,
1173 &safexcel_alg_authenc_hmac_sha256_ctr_aes,
1174 &safexcel_alg_authenc_hmac_sha384_ctr_aes,
1175 &safexcel_alg_authenc_hmac_sha512_ctr_aes,
1176 &safexcel_alg_xts_aes,
1177 &safexcel_alg_gcm,
1178 &safexcel_alg_ccm,
1179};
1180
1181static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
1182{
1183 int i, j, ret = 0;
1184
1185 for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
1186 safexcel_algs[i]->priv = priv;
1187
1188 /* Do we have all required base algorithms available? */
1189 if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
1190 safexcel_algs[i]->algo_mask)
1191 /* No, so don't register this ciphersuite */
1192 continue;
1193
1194 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
1195 ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
1196 else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
1197 ret = crypto_register_aead(&safexcel_algs[i]->alg.aead);
1198 else
1199 ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
1200
1201 if (ret)
1202 goto fail;
1203 }
1204
1205 return 0;
1206
1207fail:
1208 for (j = 0; j < i; j++) {
1209 /* Do we have all required base algorithms available? */
1210 if ((safexcel_algs[j]->algo_mask & priv->hwconfig.algo_flags) !=
1211 safexcel_algs[j]->algo_mask)
1212 /* No, so don't unregister this ciphersuite */
1213 continue;
1214
1215 if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
1216 crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
1217 else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
1218 crypto_unregister_aead(&safexcel_algs[j]->alg.aead);
1219 else
1220 crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
1221 }
1222
1223 return ret;
1224}
1225
1226static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
1227{
1228 int i;
1229
1230 for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
1231 /* Do we have all required base algorithms available? */
1232 if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
1233 safexcel_algs[i]->algo_mask)
1234 /* No, so don't unregister this ciphersuite */
1235 continue;
1236
1237 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
1238 crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
1239 else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
1240 crypto_unregister_aead(&safexcel_algs[i]->alg.aead);
1241 else
1242 crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
1243 }
1244}
1245
1246static void safexcel_configure(struct safexcel_crypto_priv *priv)
1247{
1248 u32 val, mask = 0;
1249
1250 val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
1251
1252 /* Read number of PEs from the engine */
1253 if (priv->flags & SAFEXCEL_HW_EIP197)
1254 /* Wider field width for all EIP197 type engines */
1255 mask = EIP197_N_PES_MASK;
1256 else
1257 /* Narrow field width for EIP97 type engine */
1258 mask = EIP97_N_PES_MASK;
1259
1260 priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask;
1261
1262 priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
1263
1264 val = (val & GENMASK(27, 25)) >> 25;
1265 mask = BIT(val) - 1;
1266
1267 priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
1268 priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
1269
1270 priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
1271 priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
1272}
1273
1274static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
1275{
1276 struct safexcel_register_offsets *offsets = &priv->offsets;
1277
1278 if (priv->flags & SAFEXCEL_HW_EIP197) {
1279 offsets->hia_aic = EIP197_HIA_AIC_BASE;
1280 offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
1281 offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
1282 offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE;
1283 offsets->hia_dfe = EIP197_HIA_DFE_BASE;
1284 offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE;
1285 offsets->hia_dse = EIP197_HIA_DSE_BASE;
1286 offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
1287 offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
1288 offsets->pe = EIP197_PE_BASE;
1289 offsets->global = EIP197_GLOBAL_BASE;
1290 } else {
1291 offsets->hia_aic = EIP97_HIA_AIC_BASE;
1292 offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
1293 offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
1294 offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE;
1295 offsets->hia_dfe = EIP97_HIA_DFE_BASE;
1296 offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE;
1297 offsets->hia_dse = EIP97_HIA_DSE_BASE;
1298 offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
1299 offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
1300 offsets->pe = EIP97_PE_BASE;
1301 offsets->global = EIP97_GLOBAL_BASE;
1302 }
1303}
1304
1305/*
1306 * Generic part of probe routine, shared by platform and PCI driver
1307 *
1308 * Assumes IO resources have been mapped, private data mem has been allocated,
1309 * clocks have been enabled, device pointer has been assigned etc.
1310 *
1311 */
1312static int safexcel_probe_generic(void *pdev,
1313 struct safexcel_crypto_priv *priv,
1314 int is_pci_dev)
1315{
1316 struct device *dev = priv->dev;
1317 u32 peid, version, mask, val, hiaopt;
1318 int i, ret, hwctg;
1319
1320 priv->context_pool = dmam_pool_create("safexcel-context", dev,
1321 sizeof(struct safexcel_context_record),
1322 1, 0);
1323 if (!priv->context_pool)
1324 return -ENOMEM;
1325
1326 /*
1327 * First try the EIP97 HIA version regs
1328 * For the EIP197, this is guaranteed to NOT return any of the test
1329 * values
1330 */
1331 version = readl(priv->base + EIP97_HIA_AIC_BASE + EIP197_HIA_VERSION);
1332
1333 mask = 0; /* do not swap */
1334 if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
1335 priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
1336 } else if (EIP197_REG_HI16(version) == EIP197_HIA_VERSION_BE) {
1337 /* read back byte-swapped, so complement byte swap bits */
1338 mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
1339 priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
1340 } else {
1341 /* So it wasn't an EIP97 ... maybe it's an EIP197? */
1342 version = readl(priv->base + EIP197_HIA_AIC_BASE +
1343 EIP197_HIA_VERSION);
1344 if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
1345 priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
1346 priv->flags |= SAFEXCEL_HW_EIP197;
1347 } else if (EIP197_REG_HI16(version) ==
1348 EIP197_HIA_VERSION_BE) {
1349 /* read back byte-swapped, so complement swap bits */
1350 mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
1351 priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
1352 priv->flags |= SAFEXCEL_HW_EIP197;
1353 } else {
1354 return -ENODEV;
1355 }
1356 }
1357
1358 /* Now initialize the reg offsets based on the probing info so far */
1359 safexcel_init_register_offsets(priv);
1360
1361 /*
1362 * If the version was read byte-swapped, we need to flip the device
1363 * swapping Keep in mind here, though, that what we write will also be
1364 * byte-swapped ...
1365 */
1366 if (mask) {
1367 val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
1368 val = val ^ (mask >> 24); /* toggle byte swap bits */
1369 writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
1370 }
1371
1372 /*
1373 * We're not done probing yet! We may fall through to here if no HIA
1374 * was found at all. So, with the endianness presumably correct now and
1375 * the offsets setup, *really* probe for the EIP97/EIP197.
1376 */
1377 version = readl(EIP197_GLOBAL(priv) + EIP197_VERSION);
1378 if (((priv->flags & SAFEXCEL_HW_EIP197) &&
1379 (EIP197_REG_LO16(version) != EIP197_VERSION_LE)) ||
1380 ((!(priv->flags & SAFEXCEL_HW_EIP197) &&
1381 (EIP197_REG_LO16(version) != EIP97_VERSION_LE)))) {
1382 /*
1383 * We did not find the device that matched our initial probing
1384 * (or our initial probing failed) Report appropriate error.
1385 */
1386 return -ENODEV;
1387 }
1388
1389 priv->hwconfig.hwver = EIP197_VERSION_MASK(version);
1390 hwctg = version >> 28;
1391 peid = version & 255;
1392
1393 /* Detect EIP96 packet engine and version */
1394 version = readl(EIP197_PE(priv) + EIP197_PE_EIP96_VERSION(0));
1395 if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) {
1396 dev_err(dev, "EIP%d: EIP96 not detected.\n", peid);
1397 return -ENODEV;
1398 }
1399 priv->hwconfig.pever = EIP197_VERSION_MASK(version);
1400
1401 hiaopt = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_OPTIONS);
1402
1403 if (priv->flags & SAFEXCEL_HW_EIP197) {
1404 /* EIP197 */
1405 priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
1406 EIP197_HWDATAW_MASK;
1407 priv->hwconfig.hwcfsize = ((hiaopt >> EIP197_CFSIZE_OFFSET) &
1408 EIP197_CFSIZE_MASK) +
1409 EIP197_CFSIZE_ADJUST;
1410 priv->hwconfig.hwrfsize = ((hiaopt >> EIP197_RFSIZE_OFFSET) &
1411 EIP197_RFSIZE_MASK) +
1412 EIP197_RFSIZE_ADJUST;
1413 } else {
1414 /* EIP97 */
1415 priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
1416 EIP97_HWDATAW_MASK;
1417 priv->hwconfig.hwcfsize = (hiaopt >> EIP97_CFSIZE_OFFSET) &
1418 EIP97_CFSIZE_MASK;
1419 priv->hwconfig.hwrfsize = (hiaopt >> EIP97_RFSIZE_OFFSET) &
1420 EIP97_RFSIZE_MASK;
1421 }
1422
1423 /* Get supported algorithms from EIP96 transform engine */
1424 priv->hwconfig.algo_flags = readl(EIP197_PE(priv) +
1425 EIP197_PE_EIP96_OPTIONS(0));
1426
1427 /* Print single info line describing what we just detected */
1428 dev_info(priv->dev, "EIP%d:%x(%d)-HIA:%x(%d,%d,%d),PE:%x,alg:%08x\n",
1429 peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hiaver,
1430 priv->hwconfig.hwdataw, priv->hwconfig.hwcfsize,
1431 priv->hwconfig.hwrfsize, priv->hwconfig.pever,
1432 priv->hwconfig.algo_flags);
1433
1434 safexcel_configure(priv);
1435
1436 if (IS_ENABLED(CONFIG_PCI) && priv->version == EIP197_DEVBRD) {
1437 /*
1438 * Request MSI vectors for global + 1 per ring -
1439 * or just 1 for older dev images
1440 */
1441 struct pci_dev *pci_pdev = pdev;
1442
1443 ret = pci_alloc_irq_vectors(pci_pdev,
1444 priv->config.rings + 1,
1445 priv->config.rings + 1,
1446 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1447 if (ret < 0) {
1448 dev_err(dev, "Failed to allocate PCI MSI interrupts\n");
1449 return ret;
1450 }
1451 }
1452
1453 /* Register the ring IRQ handlers and configure the rings */
1454 priv->ring = devm_kcalloc(dev, priv->config.rings,
1455 sizeof(*priv->ring),
1456 GFP_KERNEL);
1457 if (!priv->ring)
1458 return -ENOMEM;
1459
1460 for (i = 0; i < priv->config.rings; i++) {
1461 char wq_name[9] = {0};
1462 int irq;
1463 struct safexcel_ring_irq_data *ring_irq;
1464
1465 ret = safexcel_init_ring_descriptors(priv,
1466 &priv->ring[i].cdr,
1467 &priv->ring[i].rdr);
1468 if (ret) {
1469 dev_err(dev, "Failed to initialize rings\n");
1470 goto err_cleanup_rings;
1471 }
1472
1473 priv->ring[i].rdr_req = devm_kcalloc(dev,
1474 EIP197_DEFAULT_RING_SIZE,
1475 sizeof(*priv->ring[i].rdr_req),
1476 GFP_KERNEL);
1477 if (!priv->ring[i].rdr_req) {
1478 ret = -ENOMEM;
1479 goto err_cleanup_rings;
1480 }
1481
1482 ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
1483 if (!ring_irq) {
1484 ret = -ENOMEM;
1485 goto err_cleanup_rings;
1486 }
1487
1488 ring_irq->priv = priv;
1489 ring_irq->ring = i;
1490
1491 irq = safexcel_request_ring_irq(pdev,
1492 EIP197_IRQ_NUMBER(i, is_pci_dev),
1493 is_pci_dev,
1494 i,
1495 safexcel_irq_ring,
1496 safexcel_irq_ring_thread,
1497 ring_irq);
1498 if (irq < 0) {
1499 dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
1500 ret = irq;
1501 goto err_cleanup_rings;
1502 }
1503
1504 priv->ring[i].irq = irq;
1505 priv->ring[i].work_data.priv = priv;
1506 priv->ring[i].work_data.ring = i;
1507 INIT_WORK(&priv->ring[i].work_data.work,
1508 safexcel_dequeue_work);
1509
1510 snprintf(wq_name, 9, "wq_ring%d", i);
1511 priv->ring[i].workqueue =
1512 create_singlethread_workqueue(wq_name);
1513 if (!priv->ring[i].workqueue) {
1514 ret = -ENOMEM;
1515 goto err_cleanup_rings;
1516 }
1517
1518 priv->ring[i].requests = 0;
1519 priv->ring[i].busy = false;
1520
1521 crypto_init_queue(&priv->ring[i].queue,
1522 EIP197_DEFAULT_RING_SIZE);
1523
1524 spin_lock_init(&priv->ring[i].lock);
1525 spin_lock_init(&priv->ring[i].queue_lock);
1526 }
1527
1528 atomic_set(&priv->ring_used, 0);
1529
1530 ret = safexcel_hw_init(priv);
1531 if (ret) {
1532 dev_err(dev, "HW init failed (%d)\n", ret);
1533 goto err_cleanup_rings;
1534 }
1535
1536 ret = safexcel_register_algorithms(priv);
1537 if (ret) {
1538 dev_err(dev, "Failed to register algorithms (%d)\n", ret);
1539 goto err_cleanup_rings;
1540 }
1541
1542 return 0;
1543
1544err_cleanup_rings:
1545 for (i = 0; i < priv->config.rings; i++) {
1546 if (priv->ring[i].irq)
1547 irq_set_affinity_hint(priv->ring[i].irq, NULL);
1548 if (priv->ring[i].workqueue)
1549 destroy_workqueue(priv->ring[i].workqueue);
1550 }
1551
1552 return ret;
1553}
1554
1555static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
1556{
1557 int i;
1558
1559 for (i = 0; i < priv->config.rings; i++) {
1560 /* clear any pending interrupt */
1561 writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
1562 writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
1563
1564 /* Reset the CDR base address */
1565 writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
1566 writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
1567
1568 /* Reset the RDR base address */
1569 writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
1570 writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
1571 }
1572}
1573
1574#if IS_ENABLED(CONFIG_OF)
1575/* for Device Tree platform driver */
1576
1577static int safexcel_probe(struct platform_device *pdev)
1578{
1579 struct device *dev = &pdev->dev;
1580 struct safexcel_crypto_priv *priv;
1581 int ret;
1582
1583 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1584 if (!priv)
1585 return -ENOMEM;
1586
1587 priv->dev = dev;
1588 priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
1589
1590 platform_set_drvdata(pdev, priv);
1591
1592 priv->base = devm_platform_ioremap_resource(pdev, 0);
1593 if (IS_ERR(priv->base)) {
1594 dev_err(dev, "failed to get resource\n");
1595 return PTR_ERR(priv->base);
1596 }
1597
1598 priv->clk = devm_clk_get(&pdev->dev, NULL);
1599 ret = PTR_ERR_OR_ZERO(priv->clk);
1600 /* The clock isn't mandatory */
1601 if (ret != -ENOENT) {
1602 if (ret)
1603 return ret;
1604
1605 ret = clk_prepare_enable(priv->clk);
1606 if (ret) {
1607 dev_err(dev, "unable to enable clk (%d)\n", ret);
1608 return ret;
1609 }
1610 }
1611
1612 priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
1613 ret = PTR_ERR_OR_ZERO(priv->reg_clk);
1614 /* The clock isn't mandatory */
1615 if (ret != -ENOENT) {
1616 if (ret)
1617 goto err_core_clk;
1618
1619 ret = clk_prepare_enable(priv->reg_clk);
1620 if (ret) {
1621 dev_err(dev, "unable to enable reg clk (%d)\n", ret);
1622 goto err_core_clk;
1623 }
1624 }
1625
1626 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
1627 if (ret)
1628 goto err_reg_clk;
1629
1630 /* Generic EIP97/EIP197 device probing */
1631 ret = safexcel_probe_generic(pdev, priv, 0);
1632 if (ret)
1633 goto err_reg_clk;
1634
1635 return 0;
1636
1637err_reg_clk:
1638 clk_disable_unprepare(priv->reg_clk);
1639err_core_clk:
1640 clk_disable_unprepare(priv->clk);
1641 return ret;
1642}
1643
1644static int safexcel_remove(struct platform_device *pdev)
1645{
1646 struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
1647 int i;
1648
1649 safexcel_unregister_algorithms(priv);
1650 safexcel_hw_reset_rings(priv);
1651
1652 clk_disable_unprepare(priv->clk);
1653
1654 for (i = 0; i < priv->config.rings; i++) {
1655 irq_set_affinity_hint(priv->ring[i].irq, NULL);
1656 destroy_workqueue(priv->ring[i].workqueue);
1657 }
1658
1659 return 0;
1660}
1661
1662static const struct of_device_id safexcel_of_match_table[] = {
1663 {
1664 .compatible = "inside-secure,safexcel-eip97ies",
1665 .data = (void *)EIP97IES_MRVL,
1666 },
1667 {
1668 .compatible = "inside-secure,safexcel-eip197b",
1669 .data = (void *)EIP197B_MRVL,
1670 },
1671 {
1672 .compatible = "inside-secure,safexcel-eip197d",
1673 .data = (void *)EIP197D_MRVL,
1674 },
1675 /* For backward compatibility and intended for generic use */
1676 {
1677 .compatible = "inside-secure,safexcel-eip97",
1678 .data = (void *)EIP97IES_MRVL,
1679 },
1680 {
1681 .compatible = "inside-secure,safexcel-eip197",
1682 .data = (void *)EIP197B_MRVL,
1683 },
1684 {},
1685};
1686
1687MODULE_DEVICE_TABLE(of, safexcel_of_match_table);
1688
1689static struct platform_driver crypto_safexcel = {
1690 .probe = safexcel_probe,
1691 .remove = safexcel_remove,
1692 .driver = {
1693 .name = "crypto-safexcel",
1694 .of_match_table = safexcel_of_match_table,
1695 },
1696};
1697#endif
1698
1699#if IS_ENABLED(CONFIG_PCI)
1700/* PCIE devices - i.e. Inside Secure development boards */
1701
1702static int safexcel_pci_probe(struct pci_dev *pdev,
1703 const struct pci_device_id *ent)
1704{
1705 struct device *dev = &pdev->dev;
1706 struct safexcel_crypto_priv *priv;
1707 void __iomem *pciebase;
1708 int rc;
1709 u32 val;
1710
1711 dev_dbg(dev, "Probing PCIE device: vendor %04x, device %04x, subv %04x, subdev %04x, ctxt %lx\n",
1712 ent->vendor, ent->device, ent->subvendor,
1713 ent->subdevice, ent->driver_data);
1714
1715 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1716 if (!priv)
1717 return -ENOMEM;
1718
1719 priv->dev = dev;
1720 priv->version = (enum safexcel_eip_version)ent->driver_data;
1721
1722 pci_set_drvdata(pdev, priv);
1723
1724 /* enable the device */
1725 rc = pcim_enable_device(pdev);
1726 if (rc) {
1727 dev_err(dev, "Failed to enable PCI device\n");
1728 return rc;
1729 }
1730
1731 /* take ownership of PCI BAR0 */
1732 rc = pcim_iomap_regions(pdev, 1, "crypto_safexcel");
1733 if (rc) {
1734 dev_err(dev, "Failed to map IO region for BAR0\n");
1735 return rc;
1736 }
1737 priv->base = pcim_iomap_table(pdev)[0];
1738
1739 if (priv->version == EIP197_DEVBRD) {
1740 dev_dbg(dev, "Device identified as FPGA based development board - applying HW reset\n");
1741
1742 rc = pcim_iomap_regions(pdev, 4, "crypto_safexcel");
1743 if (rc) {
1744 dev_err(dev, "Failed to map IO region for BAR4\n");
1745 return rc;
1746 }
1747
1748 pciebase = pcim_iomap_table(pdev)[2];
1749 val = readl(pciebase + EIP197_XLX_IRQ_BLOCK_ID_ADDR);
1750 if ((val >> 16) == EIP197_XLX_IRQ_BLOCK_ID_VALUE) {
1751 dev_dbg(dev, "Detected Xilinx PCIE IRQ block version %d, multiple MSI support enabled\n",
1752 (val & 0xff));
1753
1754 /* Setup MSI identity map mapping */
1755 writel(EIP197_XLX_USER_VECT_LUT0_IDENT,
1756 pciebase + EIP197_XLX_USER_VECT_LUT0_ADDR);
1757 writel(EIP197_XLX_USER_VECT_LUT1_IDENT,
1758 pciebase + EIP197_XLX_USER_VECT_LUT1_ADDR);
1759 writel(EIP197_XLX_USER_VECT_LUT2_IDENT,
1760 pciebase + EIP197_XLX_USER_VECT_LUT2_ADDR);
1761 writel(EIP197_XLX_USER_VECT_LUT3_IDENT,
1762 pciebase + EIP197_XLX_USER_VECT_LUT3_ADDR);
1763
1764 /* Enable all device interrupts */
1765 writel(GENMASK(31, 0),
1766 pciebase + EIP197_XLX_USER_INT_ENB_MSK);
1767 } else {
1768 dev_err(dev, "Unrecognised IRQ block identifier %x\n",
1769 val);
1770 return -ENODEV;
1771 }
1772
1773 /* HW reset FPGA dev board */
1774 /* assert reset */
1775 writel(1, priv->base + EIP197_XLX_GPIO_BASE);
1776 wmb(); /* maintain strict ordering for accesses here */
1777 /* deassert reset */
1778 writel(0, priv->base + EIP197_XLX_GPIO_BASE);
1779 wmb(); /* maintain strict ordering for accesses here */
1780 }
1781
1782 /* enable bus mastering */
1783 pci_set_master(pdev);
1784
1785 /* Generic EIP97/EIP197 device probing */
1786 rc = safexcel_probe_generic(pdev, priv, 1);
1787 return rc;
1788}
1789
1790void safexcel_pci_remove(struct pci_dev *pdev)
1791{
1792 struct safexcel_crypto_priv *priv = pci_get_drvdata(pdev);
1793 int i;
1794
1795 safexcel_unregister_algorithms(priv);
1796
1797 for (i = 0; i < priv->config.rings; i++)
1798 destroy_workqueue(priv->ring[i].workqueue);
1799
1800 safexcel_hw_reset_rings(priv);
1801}
1802
1803static const struct pci_device_id safexcel_pci_ids[] = {
1804 {
1805 PCI_DEVICE_SUB(PCI_VENDOR_ID_XILINX, 0x9038,
1806 0x16ae, 0xc522),
1807 .driver_data = EIP197_DEVBRD,
1808 },
1809 {},
1810};
1811
1812MODULE_DEVICE_TABLE(pci, safexcel_pci_ids);
1813
1814static struct pci_driver safexcel_pci_driver = {
1815 .name = "crypto-safexcel",
1816 .id_table = safexcel_pci_ids,
1817 .probe = safexcel_pci_probe,
1818 .remove = safexcel_pci_remove,
1819};
1820#endif
1821
1822/* Unfortunately, we have to resort to global variables here */
1823#if IS_ENABLED(CONFIG_PCI)
1824int pcireg_rc = -EINVAL; /* Default safe value */
1825#endif
1826#if IS_ENABLED(CONFIG_OF)
1827int ofreg_rc = -EINVAL; /* Default safe value */
1828#endif
1829
1830static int __init safexcel_init(void)
1831{
1832#if IS_ENABLED(CONFIG_PCI)
1833 /* Register PCI driver */
1834 pcireg_rc = pci_register_driver(&safexcel_pci_driver);
1835#endif
1836
1837#if IS_ENABLED(CONFIG_OF)
1838 /* Register platform driver */
1839 ofreg_rc = platform_driver_register(&crypto_safexcel);
1840 #if IS_ENABLED(CONFIG_PCI)
1841 /* Return success if either PCI or OF registered OK */
1842 return pcireg_rc ? ofreg_rc : 0;
1843 #else
1844 return ofreg_rc;
1845 #endif
1846#else
1847 #if IS_ENABLED(CONFIG_PCI)
1848 return pcireg_rc;
1849 #else
1850 return -EINVAL;
1851 #endif
1852#endif
1853}
1854
1855static void __exit safexcel_exit(void)
1856{
1857#if IS_ENABLED(CONFIG_OF)
1858 /* Unregister platform driver */
1859 if (!ofreg_rc)
1860 platform_driver_unregister(&crypto_safexcel);
1861#endif
1862
1863#if IS_ENABLED(CONFIG_PCI)
1864 /* Unregister PCI driver if successfully registered before */
1865 if (!pcireg_rc)
1866 pci_unregister_driver(&safexcel_pci_driver);
1867#endif
1868}
1869
1870module_init(safexcel_init);
1871module_exit(safexcel_exit);
1872
1873MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
1874MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
1875MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
1876MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197");
1877MODULE_LICENSE("GPL v2");