ASR_BASE

Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/linux/drivers/crypto/inside-secure/Makefile b/marvell/linux/drivers/crypto/inside-secure/Makefile
new file mode 100644
index 0000000..13f64f9
--- /dev/null
+++ b/marvell/linux/drivers/crypto/inside-secure/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += crypto_safexcel.o
+crypto_safexcel-objs := safexcel.o safexcel_ring.o safexcel_cipher.o safexcel_hash.o
diff --git a/marvell/linux/drivers/crypto/inside-secure/safexcel.c b/marvell/linux/drivers/crypto/inside-secure/safexcel.c
new file mode 100644
index 0000000..04638e1
--- /dev/null
+++ b/marvell/linux/drivers/crypto/inside-secure/safexcel.c
@@ -0,0 +1,1877 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Marvell
+ *
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+
+#include "safexcel.h"
+
+static u32 max_rings = EIP197_MAX_RINGS;
+module_param(max_rings, uint, 0644);
+MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
+
+static void eip197_trc_cache_setupvirt(struct safexcel_crypto_priv *priv)
+{
+	int i;
+
+	/*
+	 * Map all interfaces/rings to register index 0
+	 * so they can share contexts. Without this, the EIP197 will
+	 * assume each interface/ring to be in its own memory domain
+	 * i.e. have its own subset of UNIQUE memory addresses.
+	 * Which would cause records with the SAME memory address to
+	 * use DIFFERENT cache buffers, causing both poor cache utilization
+	 * AND serious coherence/invalidation issues.
+	 */
+	for (i = 0; i < 4; i++)
+		writel(0, priv->base + EIP197_FLUE_IFC_LUT(i));
+
+	/*
+	 * Initialize other virtualization regs for cache
+	 * These may not be in their reset state ...
+	 */
+	for (i = 0; i < priv->config.rings; i++) {
+		writel(0, priv->base + EIP197_FLUE_CACHEBASE_LO(i));
+		writel(0, priv->base + EIP197_FLUE_CACHEBASE_HI(i));
+		writel(EIP197_FLUE_CONFIG_MAGIC,
+		       priv->base + EIP197_FLUE_CONFIG(i));
+	}
+	writel(0, priv->base + EIP197_FLUE_OFFSETS);
+	writel(0, priv->base + EIP197_FLUE_ARC4_OFFSET);
+}
+
+static void eip197_trc_cache_banksel(struct safexcel_crypto_priv *priv,
+				     u32 addrmid, int *actbank)
+{
+	u32 val;
+	int curbank;
+
+	curbank = addrmid >> 16;
+	if (curbank != *actbank) {
+		val = readl(priv->base + EIP197_CS_RAM_CTRL);
+		val = (val & ~EIP197_CS_BANKSEL_MASK) |
+		      (curbank << EIP197_CS_BANKSEL_OFS);
+		writel(val, priv->base + EIP197_CS_RAM_CTRL);
+		*actbank = curbank;
+	}
+}
+
+static u32 eip197_trc_cache_probe(struct safexcel_crypto_priv *priv,
+				  int maxbanks, u32 probemask)
+{
+	u32 val, addrhi, addrlo, addrmid;
+	int actbank;
+
+	/*
+	 * And probe the actual size of the physically attached cache data RAM
+	 * Using a binary subdivision algorithm downto 32 byte cache lines.
+	 */
+	addrhi = 1 << (16 + maxbanks);
+	addrlo = 0;
+	actbank = min(maxbanks - 1, 0);
+	while ((addrhi - addrlo) > 32) {
+		/* write marker to lowest address in top half */
+		addrmid = (addrhi + addrlo) >> 1;
+		eip197_trc_cache_banksel(priv, addrmid, &actbank);
+		writel((addrmid | (addrlo << 16)) & probemask,
+			priv->base + EIP197_CLASSIFICATION_RAMS +
+			(addrmid & 0xffff));
+
+		/* write marker to lowest address in bottom half */
+		eip197_trc_cache_banksel(priv, addrlo, &actbank);
+		writel((addrlo | (addrhi << 16)) & probemask,
+			priv->base + EIP197_CLASSIFICATION_RAMS +
+			(addrlo & 0xffff));
+
+		/* read back marker from top half */
+		eip197_trc_cache_banksel(priv, addrmid, &actbank);
+		val = readl(priv->base + EIP197_CLASSIFICATION_RAMS +
+			    (addrmid & 0xffff));
+
+		if (val == ((addrmid | (addrlo << 16)) & probemask)) {
+			/* read back correct, continue with top half */
+			addrlo = addrmid;
+		} else {
+			/* not read back correct, continue with bottom half */
+			addrhi = addrmid;
+		}
+	}
+	return addrhi;
+}
+
+static void eip197_trc_cache_clear(struct safexcel_crypto_priv *priv,
+				   int cs_rc_max, int cs_ht_wc)
+{
+	int i;
+	u32 htable_offset, val, offset;
+
+	/* Clear all records in administration RAM */
+	for (i = 0; i < cs_rc_max; i++) {
+		offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
+
+		writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
+		       EIP197_CS_RC_PREV(EIP197_RC_NULL),
+		       priv->base + offset);
+
+		val = EIP197_CS_RC_NEXT(i + 1) | EIP197_CS_RC_PREV(i - 1);
+		if (i == 0)
+			val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
+		else if (i == cs_rc_max - 1)
+			val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
+		writel(val, priv->base + offset + 4);
+		/* must also initialize the address key due to ECC! */
+		writel(0, priv->base + offset + 8);
+		writel(0, priv->base + offset + 12);
+	}
+
+	/* Clear the hash table entries */
+	htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
+	for (i = 0; i < cs_ht_wc; i++)
+		writel(GENMASK(29, 0),
+		       priv->base + EIP197_CLASSIFICATION_RAMS +
+		       htable_offset + i * sizeof(u32));
+}
+
+static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
+{
+	u32 val, dsize, asize;
+	int cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
+	int cs_rc_abs_max, cs_ht_sz;
+	int maxbanks;
+
+	/* Setup (dummy) virtualization for cache */
+	eip197_trc_cache_setupvirt(priv);
+
+	/*
+	 * Enable the record cache memory access and
+	 * probe the bank select width
+	 */
+	val = readl(priv->base + EIP197_CS_RAM_CTRL);
+	val &= ~EIP197_TRC_ENABLE_MASK;
+	val |= EIP197_TRC_ENABLE_0 | EIP197_CS_BANKSEL_MASK;
+	writel(val, priv->base + EIP197_CS_RAM_CTRL);
+	val = readl(priv->base + EIP197_CS_RAM_CTRL);
+	maxbanks = ((val&EIP197_CS_BANKSEL_MASK)>>EIP197_CS_BANKSEL_OFS) + 1;
+
+	/* Clear all ECC errors */
+	writel(0, priv->base + EIP197_TRC_ECCCTRL);
+
+	/*
+	 * Make sure the cache memory is accessible by taking record cache into
+	 * reset. Need data memory access here, not admin access.
+	 */
+	val = readl(priv->base + EIP197_TRC_PARAMS);
+	val |= EIP197_TRC_PARAMS_SW_RESET | EIP197_TRC_PARAMS_DATA_ACCESS;
+	writel(val, priv->base + EIP197_TRC_PARAMS);
+
+	/* Probed data RAM size in bytes */
+	dsize = eip197_trc_cache_probe(priv, maxbanks, 0xffffffff);
+
+	/*
+	 * Now probe the administration RAM size pretty much the same way
+	 * Except that only the lower 30 bits are writable and we don't need
+	 * bank selects
+	 */
+	val = readl(priv->base + EIP197_TRC_PARAMS);
+	/* admin access now */
+	val &= ~(EIP197_TRC_PARAMS_DATA_ACCESS | EIP197_CS_BANKSEL_MASK);
+	writel(val, priv->base + EIP197_TRC_PARAMS);
+
+	/* Probed admin RAM size in admin words */
+	asize = eip197_trc_cache_probe(priv, 0, 0xbfffffff) >> 4;
+
+	/* Clear any ECC errors detected while probing! */
+	writel(0, priv->base + EIP197_TRC_ECCCTRL);
+
+	/*
+	 * Determine optimal configuration from RAM sizes
+	 * Note that we assume that the physical RAM configuration is sane
+	 * Therefore, we don't do any parameter error checking here ...
+	 */
+
+	/* For now, just use a single record format covering everything */
+	cs_trc_rec_wc = EIP197_CS_TRC_REC_WC;
+	cs_trc_lg_rec_wc = EIP197_CS_TRC_REC_WC;
+
+	/*
+	 * Step #1: How many records will physically fit?
+	 * Hard upper limit is 1023!
+	 */
+	cs_rc_abs_max = min_t(uint, ((dsize >> 2) / cs_trc_lg_rec_wc), 1023);
+	/* Step #2: Need at least 2 words in the admin RAM per record */
+	cs_rc_max = min_t(uint, cs_rc_abs_max, (asize >> 1));
+	/* Step #3: Determine log2 of hash table size */
+	cs_ht_sz = __fls(asize - cs_rc_max) - 2;
+	/* Step #4: determine current size of hash table in dwords */
+	cs_ht_wc = 16 << cs_ht_sz; /* dwords, not admin words */
+	/* Step #5: add back excess words and see if we can fit more records */
+	cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 2));
+
+	/* Clear the cache RAMs */
+	eip197_trc_cache_clear(priv, cs_rc_max, cs_ht_wc);
+
+	/* Disable the record cache memory access */
+	val = readl(priv->base + EIP197_CS_RAM_CTRL);
+	val &= ~EIP197_TRC_ENABLE_MASK;
+	writel(val, priv->base + EIP197_CS_RAM_CTRL);
+
+	/* Write head and tail pointers of the record free chain */
+	val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
+	      EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
+	writel(val, priv->base + EIP197_TRC_FREECHAIN);
+
+	/* Configure the record cache #1 */
+	val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
+	      EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
+	writel(val, priv->base + EIP197_TRC_PARAMS2);
+
+	/* Configure the record cache #2 */
+	val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
+	      EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
+	      EIP197_TRC_PARAMS_HTABLE_SZ(cs_ht_sz);
+	writel(val, priv->base + EIP197_TRC_PARAMS);
+
+	dev_info(priv->dev, "TRC init: %dd,%da (%dr,%dh)\n",
+		 dsize, asize, cs_rc_max, cs_ht_wc + cs_ht_wc);
+}
+
+static void eip197_init_firmware(struct safexcel_crypto_priv *priv)
+{
+	int pe, i;
+	u32 val;
+
+	for (pe = 0; pe < priv->config.pes; pe++) {
+		/* Configure the token FIFO's */
+		writel(3, EIP197_PE(priv) + EIP197_PE_ICE_PUTF_CTRL(pe));
+		writel(0, EIP197_PE(priv) + EIP197_PE_ICE_PPTF_CTRL(pe));
+
+		/* Clear the ICE scratchpad memory */
+		val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
+		val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
+		       EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
+		       EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
+		       EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
+		writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
+
+		/* clear the scratchpad RAM using 32 bit writes only */
+		for (i = 0; i < EIP197_NUM_OF_SCRATCH_BLOCKS; i++)
+			writel(0, EIP197_PE(priv) +
+				  EIP197_PE_ICE_SCRATCH_RAM(pe) + (i << 2));
+
+		/* Reset the IFPP engine to make its program mem accessible */
+		writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
+		       EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
+		       EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
+		       EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));
+
+		/* Reset the IPUE engine to make its program mem accessible */
+		writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
+		       EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
+		       EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
+		       EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
+
+		/* Enable access to all IFPP program memories */
+		writel(EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN,
+		       EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
+	}
+
+}
+
+static int eip197_write_firmware(struct safexcel_crypto_priv *priv,
+				  const struct firmware *fw)
+{
+	const u32 *data = (const u32 *)fw->data;
+	int i;
+
+	/* Write the firmware */
+	for (i = 0; i < fw->size / sizeof(u32); i++)
+		writel(be32_to_cpu(data[i]),
+		       priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
+
+	/* Exclude final 2 NOPs from size */
+	return i - EIP197_FW_TERMINAL_NOPS;
+}
+
+/*
+ * If FW is actual production firmware, then poll for its initialization
+ * to complete and check if it is good for the HW, otherwise just return OK.
+ */
+static bool poll_fw_ready(struct safexcel_crypto_priv *priv, int fpp)
+{
+	int pe, pollcnt;
+	u32 base, pollofs;
+
+	if (fpp)
+		pollofs  = EIP197_FW_FPP_READY;
+	else
+		pollofs  = EIP197_FW_PUE_READY;
+
+	for (pe = 0; pe < priv->config.pes; pe++) {
+		base = EIP197_PE_ICE_SCRATCH_RAM(pe);
+		pollcnt = EIP197_FW_START_POLLCNT;
+		while (pollcnt &&
+		       (readl_relaxed(EIP197_PE(priv) + base +
+			      pollofs) != 1)) {
+			pollcnt--;
+		}
+		if (!pollcnt) {
+			dev_err(priv->dev, "FW(%d) for PE %d failed to start\n",
+				fpp, pe);
+			return false;
+		}
+	}
+	return true;
+}
+
+static bool eip197_start_firmware(struct safexcel_crypto_priv *priv,
+				  int ipuesz, int ifppsz, int minifw)
+{
+	int pe;
+	u32 val;
+
+	for (pe = 0; pe < priv->config.pes; pe++) {
+		/* Disable access to all program memory */
+		writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
+
+		/* Start IFPP microengines */
+		if (minifw)
+			val = 0;
+		else
+			val = EIP197_PE_ICE_UENG_START_OFFSET((ifppsz - 1) &
+					EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
+				EIP197_PE_ICE_UENG_DEBUG_RESET;
+		writel(val, EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));
+
+		/* Start IPUE microengines */
+		if (minifw)
+			val = 0;
+		else
+			val = EIP197_PE_ICE_UENG_START_OFFSET((ipuesz - 1) &
+					EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
+				EIP197_PE_ICE_UENG_DEBUG_RESET;
+		writel(val, EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
+	}
+
+	/* For miniFW startup, there is no initialization, so always succeed */
+	if (minifw)
+		return true;
+
+	/* Wait until all the firmwares have properly started up */
+	if (!poll_fw_ready(priv, 1))
+		return false;
+	if (!poll_fw_ready(priv, 0))
+		return false;
+
+	return true;
+}
+
+static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
+{
+	const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
+	const struct firmware *fw[FW_NB];
+	char fw_path[37], *dir = NULL;
+	int i, j, ret = 0, pe;
+	int ipuesz, ifppsz, minifw = 0;
+
+	if (priv->version == EIP197D_MRVL)
+		dir = "eip197d";
+	else if (priv->version == EIP197B_MRVL ||
+		 priv->version == EIP197_DEVBRD)
+		dir = "eip197b";
+	else
+		return -ENODEV;
+
+retry_fw:
+	for (i = 0; i < FW_NB; i++) {
+		snprintf(fw_path, 37, "inside-secure/%s/%s", dir, fw_name[i]);
+		ret = firmware_request_nowarn(&fw[i], fw_path, priv->dev);
+		if (ret) {
+			if (minifw || priv->version != EIP197B_MRVL)
+				goto release_fw;
+
+			/* Fallback to the old firmware location for the
+			 * EIP197b.
+			 */
+			ret = firmware_request_nowarn(&fw[i], fw_name[i],
+						      priv->dev);
+			if (ret)
+				goto release_fw;
+		}
+	}
+
+	eip197_init_firmware(priv);
+
+	ifppsz = eip197_write_firmware(priv, fw[FW_IFPP]);
+
+	/* Enable access to IPUE program memories */
+	for (pe = 0; pe < priv->config.pes; pe++)
+		writel(EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN,
+		       EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
+
+	ipuesz = eip197_write_firmware(priv, fw[FW_IPUE]);
+
+	if (eip197_start_firmware(priv, ipuesz, ifppsz, minifw)) {
+		dev_dbg(priv->dev, "Firmware loaded successfully\n");
+		return 0;
+	}
+
+	ret = -ENODEV;
+
+release_fw:
+	for (j = 0; j < i; j++)
+		release_firmware(fw[j]);
+
+	if (!minifw) {
+		/* Retry with minifw path */
+		dev_dbg(priv->dev, "Firmware set not (fully) present or init failed, falling back to BCLA mode\n");
+		dir = "eip197_minifw";
+		minifw = 1;
+		goto retry_fw;
+	}
+
+	dev_dbg(priv->dev, "Firmware load failed.\n");
+
+	return ret;
+}
+
+static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
+{
+	u32 cd_size_rnd, val;
+	int i, cd_fetch_cnt;
+
+	cd_size_rnd  = (priv->config.cd_size +
+			(BIT(priv->hwconfig.hwdataw) - 1)) >>
+		       priv->hwconfig.hwdataw;
+	/* determine number of CD's we can fetch into the CD FIFO as 1 block */
+	if (priv->flags & SAFEXCEL_HW_EIP197) {
+		/* EIP197: try to fetch enough in 1 go to keep all pipes busy */
+		cd_fetch_cnt = (1 << priv->hwconfig.hwcfsize) / cd_size_rnd;
+		cd_fetch_cnt = min_t(uint, cd_fetch_cnt,
+				     (priv->config.pes * EIP197_FETCH_DEPTH));
+	} else {
+		/* for the EIP97, just fetch all that fits minus 1 */
+		cd_fetch_cnt = ((1 << priv->hwconfig.hwcfsize) /
+				cd_size_rnd) - 1;
+	}
+
+	for (i = 0; i < priv->config.rings; i++) {
+		/* ring base address */
+		writel(lower_32_bits(priv->ring[i].cdr.base_dma),
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+		writel(upper_32_bits(priv->ring[i].cdr.base_dma),
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+
+		writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
+		       priv->config.cd_size,
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
+		writel(((cd_fetch_cnt *
+			 (cd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
+		       (cd_fetch_cnt * priv->config.cd_offset),
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
+
+		/* Configure DMA tx control */
+		val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
+		val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
+		writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
+
+		/* clear any pending interrupt */
+		writel(GENMASK(5, 0),
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
+	}
+
+	return 0;
+}
+
+static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
+{
+	u32 rd_size_rnd, val;
+	int i, rd_fetch_cnt;
+
+	/* determine number of RD's we can fetch into the FIFO as one block */
+	rd_size_rnd = (EIP197_RD64_FETCH_SIZE +
+		       (BIT(priv->hwconfig.hwdataw) - 1)) >>
+		      priv->hwconfig.hwdataw;
+	if (priv->flags & SAFEXCEL_HW_EIP197) {
+		/* EIP197: try to fetch enough in 1 go to keep all pipes busy */
+		rd_fetch_cnt = (1 << priv->hwconfig.hwrfsize) / rd_size_rnd;
+		rd_fetch_cnt = min_t(uint, rd_fetch_cnt,
+				     (priv->config.pes * EIP197_FETCH_DEPTH));
+	} else {
+		/* for the EIP97, just fetch all that fits minus 1 */
+		rd_fetch_cnt = ((1 << priv->hwconfig.hwrfsize) /
+				rd_size_rnd) - 1;
+	}
+
+	for (i = 0; i < priv->config.rings; i++) {
+		/* ring base address */
+		writel(lower_32_bits(priv->ring[i].rdr.base_dma),
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+		writel(upper_32_bits(priv->ring[i].rdr.base_dma),
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+
+		writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
+		       priv->config.rd_size,
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
+
+		writel(((rd_fetch_cnt *
+			 (rd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
+		       (rd_fetch_cnt * priv->config.rd_offset),
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
+
+		/* Configure DMA tx control */
+		val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
+		val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
+		val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
+		writel(val,
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
+
+		/* clear any pending interrupt */
+		writel(GENMASK(7, 0),
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
+
+		/* enable ring interrupt */
+		val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
+		val |= EIP197_RDR_IRQ(i);
+		writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
+	}
+
+	return 0;
+}
+
+static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
+{
+	u32 val;
+	int i, ret, pe;
+
+	dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n",
+		priv->config.pes, priv->config.rings);
+
+	/*
+	 * For EIP197's only set maximum number of TX commands to 2^5 = 32
+	 * Skip for the EIP97 as it does not have this field.
+	 */
+	if (priv->flags & SAFEXCEL_HW_EIP197) {
+		val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
+		val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
+		writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
+	}
+
+	/* Configure wr/rd cache values */
+	writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
+	       EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
+	       EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
+
+	/* Interrupts reset */
+
+	/* Disable all global interrupts */
+	writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
+
+	/* Clear any pending interrupt */
+	writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
+
+	/* Processing Engine configuration */
+	for (pe = 0; pe < priv->config.pes; pe++) {
+		/* Data Fetch Engine configuration */
+
+		/* Reset all DFE threads */
+		writel(EIP197_DxE_THR_CTRL_RESET_PE,
+		       EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
+
+		if (priv->flags & SAFEXCEL_HW_EIP197)
+			/* Reset HIA input interface arbiter (EIP197 only) */
+			writel(EIP197_HIA_RA_PE_CTRL_RESET,
+			       EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
+
+		/* DMA transfer size to use */
+		val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
+		val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
+		       EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
+		val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
+		       EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
+		val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
+		val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
+		writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));
+
+		/* Leave the DFE threads reset state */
+		writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
+
+		/* Configure the processing engine thresholds */
+		writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
+		       EIP197_PE_IN_xBUF_THRES_MAX(9),
+		       EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
+		writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
+		       EIP197_PE_IN_xBUF_THRES_MAX(7),
+		       EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));
+
+		if (priv->flags & SAFEXCEL_HW_EIP197)
+			/* enable HIA input interface arbiter and rings */
+			writel(EIP197_HIA_RA_PE_CTRL_EN |
+			       GENMASK(priv->config.rings - 1, 0),
+			       EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
+
+		/* Data Store Engine configuration */
+
+		/* Reset all DSE threads */
+		writel(EIP197_DxE_THR_CTRL_RESET_PE,
+		       EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
+
+		/* Wait for all DSE threads to complete */
+		while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
+			GENMASK(15, 12)) != GENMASK(15, 12))
+			;
+
+		/* DMA transfer size to use */
+		val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
+		val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
+		       EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
+		val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
+		val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
+		/* FIXME: instability issues can occur for EIP97 but disabling
+		 * it impacts performance.
+		 */
+		if (priv->flags & SAFEXCEL_HW_EIP197)
+			val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
+		writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));
+
+		/* Leave the DSE threads reset state */
+		writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
+
+		/* Configure the procesing engine thresholds */
+		writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) |
+		       EIP197_PE_OUT_DBUF_THRES_MAX(8),
+		       EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
+
+		/* Processing Engine configuration */
+
+		/* Token & context configuration */
+		val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES |
+		      EIP197_PE_EIP96_TOKEN_CTRL_NO_TOKEN_WAIT |
+		      EIP197_PE_EIP96_TOKEN_CTRL_ENABLE_TIMEOUT;
+		writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe));
+
+		/* H/W capabilities selection: just enable everything */
+		writel(EIP197_FUNCTION_ALL,
+		       EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
+		writel(EIP197_FUNCTION_ALL,
+		       EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION2_EN(pe));
+	}
+
+	/* Command Descriptor Rings prepare */
+	for (i = 0; i < priv->config.rings; i++) {
+		/* Clear interrupts for this ring */
+		writel(GENMASK(31, 0),
+		       EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
+
+		/* Disable external triggering */
+		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
+
+		/* Clear the pending prepared counter */
+		writel(EIP197_xDR_PREP_CLR_COUNT,
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
+
+		/* Clear the pending processed counter */
+		writel(EIP197_xDR_PROC_CLR_COUNT,
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
+
+		writel(0,
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
+		writel(0,
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
+
+		writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
+		       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
+	}
+
+	/* Result Descriptor Ring prepare */
+	for (i = 0; i < priv->config.rings; i++) {
+		/* Disable external triggering*/
+		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
+
+		/* Clear the pending prepared counter */
+		writel(EIP197_xDR_PREP_CLR_COUNT,
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
+
+		/* Clear the pending processed counter */
+		writel(EIP197_xDR_PROC_CLR_COUNT,
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
+
+		writel(0,
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
+		writel(0,
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
+
+		/* Ring size */
+		writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
+		       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
+	}
+
+	for (pe = 0; pe < priv->config.pes; pe++) {
+		/* Enable command descriptor rings */
+		writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
+		       EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
+
+		/* Enable result descriptor rings */
+		writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
+		       EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
+	}
+
+	/* Clear any HIA interrupt */
+	writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
+
+	if (priv->flags & SAFEXCEL_HW_EIP197) {
+		eip197_trc_cache_init(priv);
+		priv->flags |= EIP197_TRC_CACHE;
+
+		ret = eip197_load_firmwares(priv);
+		if (ret)
+			return ret;
+	}
+
+	safexcel_hw_setup_cdesc_rings(priv);
+	safexcel_hw_setup_rdesc_rings(priv);
+
+	return 0;
+}
+
+/* Called with ring's lock taken */
+static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
+				       int ring)
+{
+	int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
+
+	if (!coal)
+		return;
+
+	/* Configure when we want an interrupt */
+	writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
+	       EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
+	       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
+}
+
+void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
+{
+	struct crypto_async_request *req, *backlog;
+	struct safexcel_context *ctx;
+	int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
+
+	/* If a request wasn't properly dequeued because of a lack of resources,
+	 * proceeded it first,
+	 */
+	req = priv->ring[ring].req;
+	backlog = priv->ring[ring].backlog;
+	if (req)
+		goto handle_req;
+
+	while (true) {
+		spin_lock_bh(&priv->ring[ring].queue_lock);
+		backlog = crypto_get_backlog(&priv->ring[ring].queue);
+		req = crypto_dequeue_request(&priv->ring[ring].queue);
+		spin_unlock_bh(&priv->ring[ring].queue_lock);
+
+		if (!req) {
+			priv->ring[ring].req = NULL;
+			priv->ring[ring].backlog = NULL;
+			goto finalize;
+		}
+
+handle_req:
+		ctx = crypto_tfm_ctx(req->tfm);
+		ret = ctx->send(req, ring, &commands, &results);
+		if (ret)
+			goto request_failed;
+
+		if (backlog)
+			backlog->complete(backlog, -EINPROGRESS);
+
+		/* In case the send() helper did not issue any command to push
+		 * to the engine because the input data was cached, continue to
+		 * dequeue other requests as this is valid and not an error.
+		 */
+		if (!commands && !results)
+			continue;
+
+		cdesc += commands;
+		rdesc += results;
+		nreq++;
+	}
+
+request_failed:
+	/* Not enough resources to handle all the requests. Bail out and save
+	 * the request and the backlog for the next dequeue call (per-ring).
+	 */
+	priv->ring[ring].req = req;
+	priv->ring[ring].backlog = backlog;
+
+finalize:
+	if (!nreq)
+		return;
+
+	spin_lock_bh(&priv->ring[ring].lock);
+
+	priv->ring[ring].requests += nreq;
+
+	if (!priv->ring[ring].busy) {
+		safexcel_try_push_requests(priv, ring);
+		priv->ring[ring].busy = true;
+	}
+
+	spin_unlock_bh(&priv->ring[ring].lock);
+
+	/* let the RDR know we have pending descriptors */
+	writel((rdesc * priv->config.rd_offset) << 2,
+	       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
+
+	/* let the CDR know we have pending descriptors */
+	writel((cdesc * priv->config.cd_offset) << 2,
+	       EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
+}
+
+inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
+				       struct safexcel_result_desc *rdesc)
+{
+	if (likely((!rdesc->descriptor_overflow) &&
+		   (!rdesc->buffer_overflow) &&
+		   (!rdesc->result_data.error_code)))
+		return 0;
+
+	if (rdesc->descriptor_overflow)
+		dev_err(priv->dev, "Descriptor overflow detected");
+
+	if (rdesc->buffer_overflow)
+		dev_err(priv->dev, "Buffer overflow detected");
+
+	if (rdesc->result_data.error_code & 0x4066) {
+		/* Fatal error (bits 1,2,5,6 & 14) */
+		dev_err(priv->dev,
+			"result descriptor error (%x)",
+			rdesc->result_data.error_code);
+		return -EIO;
+	} else if (rdesc->result_data.error_code &
+		   (BIT(7) | BIT(4) | BIT(3) | BIT(0))) {
+		/*
+		 * Give priority over authentication fails:
+		 * Blocksize, length & overflow errors,
+		 * something wrong with the input!
+		 */
+		return -EINVAL;
+	} else if (rdesc->result_data.error_code & BIT(9)) {
+		/* Authentication failed */
+		return -EBADMSG;
+	}
+
+	/* All other non-fatal errors */
+	return -EINVAL;
+}
+
+inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
+				 int ring,
+				 struct safexcel_result_desc *rdesc,
+				 struct crypto_async_request *req)
+{
+	int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
+
+	priv->ring[ring].rdr_req[i] = req;
+}
+
+inline struct crypto_async_request *
+safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
+{
+	int i = safexcel_ring_first_rdr_index(priv, ring);
+
+	return priv->ring[ring].rdr_req[i];
+}
+
+void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
+{
+	struct safexcel_command_desc *cdesc;
+
+	/* Acknowledge the command descriptors */
+	do {
+		cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
+		if (IS_ERR(cdesc)) {
+			dev_err(priv->dev,
+				"Could not retrieve the command descriptor\n");
+			return;
+		}
+	} while (!cdesc->last_seg);
+}
+
+void safexcel_inv_complete(struct crypto_async_request *req, int error)
+{
+	struct safexcel_inv_result *result = req->data;
+
+	if (error == -EINPROGRESS)
+		return;
+
+	result->error = error;
+	complete(&result->completion);
+}
+
+int safexcel_invalidate_cache(struct crypto_async_request *async,
+			      struct safexcel_crypto_priv *priv,
+			      dma_addr_t ctxr_dma, int ring)
+{
+	struct safexcel_command_desc *cdesc;
+	struct safexcel_result_desc *rdesc;
+	int ret = 0;
+
+	/* Prepare command descriptor */
+	cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
+	if (IS_ERR(cdesc))
+		return PTR_ERR(cdesc);
+
+	cdesc->control_data.type = EIP197_TYPE_EXTENDED;
+	cdesc->control_data.options = 0;
+	cdesc->control_data.refresh = 0;
+	cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
+
+	/* Prepare result descriptor */
+	rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
+
+	if (IS_ERR(rdesc)) {
+		ret = PTR_ERR(rdesc);
+		goto cdesc_rollback;
+	}
+
+	safexcel_rdr_req_set(priv, ring, rdesc, async);
+
+	return ret;
+
+cdesc_rollback:
+	safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
+
+	return ret;
+}
+
+static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
+						     int ring)
+{
+	struct crypto_async_request *req;
+	struct safexcel_context *ctx;
+	int ret, i, nreq, ndesc, tot_descs, handled = 0;
+	bool should_complete;
+
+handle_results:
+	tot_descs = 0;
+
+	nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
+	nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
+	nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
+	if (!nreq)
+		goto requests_left;
+
+	for (i = 0; i < nreq; i++) {
+		req = safexcel_rdr_req_get(priv, ring);
+
+		ctx = crypto_tfm_ctx(req->tfm);
+		ndesc = ctx->handle_result(priv, ring, req,
+					   &should_complete, &ret);
+		if (ndesc < 0) {
+			dev_err(priv->dev, "failed to handle result (%d)\n",
+				ndesc);
+			goto acknowledge;
+		}
+
+		if (should_complete) {
+			local_bh_disable();
+			req->complete(req, ret);
+			local_bh_enable();
+		}
+
+		tot_descs += ndesc;
+		handled++;
+	}
+
+acknowledge:
+	if (i)
+		writel(EIP197_xDR_PROC_xD_PKT(i) |
+		       EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
+		       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
+
+	/* If the number of requests overflowed the counter, try to proceed more
+	 * requests.
+	 */
+	if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
+		goto handle_results;
+
+requests_left:
+	spin_lock_bh(&priv->ring[ring].lock);
+
+	priv->ring[ring].requests -= handled;
+	safexcel_try_push_requests(priv, ring);
+
+	if (!priv->ring[ring].requests)
+		priv->ring[ring].busy = false;
+
+	spin_unlock_bh(&priv->ring[ring].lock);
+}
+
+static void safexcel_dequeue_work(struct work_struct *work)
+{
+	struct safexcel_work_data *data =
+			container_of(work, struct safexcel_work_data, work);
+
+	safexcel_dequeue(data->priv, data->ring);
+}
+
+struct safexcel_ring_irq_data {
+	struct safexcel_crypto_priv *priv;
+	int ring;
+};
+
+static irqreturn_t safexcel_irq_ring(int irq, void *data)
+{
+	struct safexcel_ring_irq_data *irq_data = data;
+	struct safexcel_crypto_priv *priv = irq_data->priv;
+	int ring = irq_data->ring, rc = IRQ_NONE;
+	u32 status, stat;
+
+	status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
+	if (!status)
+		return rc;
+
+	/* RDR interrupts */
+	if (status & EIP197_RDR_IRQ(ring)) {
+		stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
+
+		if (unlikely(stat & EIP197_xDR_ERR)) {
+			/*
+			 * Fatal error, the RDR is unusable and must be
+			 * reinitialized. This should not happen under
+			 * normal circumstances.
+			 */
+			dev_err(priv->dev, "RDR: fatal error.\n");
+		} else if (likely(stat & EIP197_xDR_THRESH)) {
+			rc = IRQ_WAKE_THREAD;
+		}
+
+		/* ACK the interrupts */
+		writel(stat & 0xff,
+		       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
+	}
+
+	/* ACK the interrupts */
+	writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
+
+	return rc;
+}
+
+static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
+{
+	struct safexcel_ring_irq_data *irq_data = data;
+	struct safexcel_crypto_priv *priv = irq_data->priv;
+	int ring = irq_data->ring;
+
+	safexcel_handle_result_descriptor(priv, ring);
+
+	queue_work(priv->ring[ring].workqueue,
+		   &priv->ring[ring].work_data.work);
+
+	return IRQ_HANDLED;
+}
+
+static int safexcel_request_ring_irq(void *pdev, int irqid,
+				     int is_pci_dev,
+				     int ring_id,
+				     irq_handler_t handler,
+				     irq_handler_t threaded_handler,
+				     struct safexcel_ring_irq_data *ring_irq_priv)
+{
+	int ret, irq, cpu;
+	struct device *dev;
+
+	if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) {
+		struct pci_dev *pci_pdev = pdev;
+
+		dev = &pci_pdev->dev;
+		irq = pci_irq_vector(pci_pdev, irqid);
+		if (irq < 0) {
+			dev_err(dev, "unable to get device MSI IRQ %d (err %d)\n",
+				irqid, irq);
+			return irq;
+		}
+	} else if (IS_ENABLED(CONFIG_OF)) {
+		struct platform_device *plf_pdev = pdev;
+		char irq_name[6] = {0}; /* "ringX\0" */
+
+		snprintf(irq_name, 6, "ring%d", irqid);
+		dev = &plf_pdev->dev;
+		irq = platform_get_irq_byname(plf_pdev, irq_name);
+
+		if (irq < 0) {
+			dev_err(dev, "unable to get IRQ '%s' (err %d)\n",
+				irq_name, irq);
+			return irq;
+		}
+	} else {
+		return -ENXIO;
+	}
+
+	ret = devm_request_threaded_irq(dev, irq, handler,
+					threaded_handler, IRQF_ONESHOT,
+					dev_name(dev), ring_irq_priv);
+	if (ret) {
+		dev_err(dev, "unable to request IRQ %d\n", irq);
+		return ret;
+	}
+
+	/* Set affinity */
+	cpu = cpumask_local_spread(ring_id, NUMA_NO_NODE);
+	irq_set_affinity_hint(irq, get_cpu_mask(cpu));
+
+	return irq;
+}
+
+static struct safexcel_alg_template *safexcel_algs[] = {
+	&safexcel_alg_ecb_des,
+	&safexcel_alg_cbc_des,
+	&safexcel_alg_ecb_des3_ede,
+	&safexcel_alg_cbc_des3_ede,
+	&safexcel_alg_ecb_aes,
+	&safexcel_alg_cbc_aes,
+	&safexcel_alg_cfb_aes,
+	&safexcel_alg_ofb_aes,
+	&safexcel_alg_ctr_aes,
+	&safexcel_alg_md5,
+	&safexcel_alg_sha1,
+	&safexcel_alg_sha224,
+	&safexcel_alg_sha256,
+	&safexcel_alg_sha384,
+	&safexcel_alg_sha512,
+	&safexcel_alg_hmac_md5,
+	&safexcel_alg_hmac_sha1,
+	&safexcel_alg_hmac_sha224,
+	&safexcel_alg_hmac_sha256,
+	&safexcel_alg_hmac_sha384,
+	&safexcel_alg_hmac_sha512,
+	&safexcel_alg_authenc_hmac_sha1_cbc_aes,
+	&safexcel_alg_authenc_hmac_sha224_cbc_aes,
+	&safexcel_alg_authenc_hmac_sha256_cbc_aes,
+	&safexcel_alg_authenc_hmac_sha384_cbc_aes,
+	&safexcel_alg_authenc_hmac_sha512_cbc_aes,
+	&safexcel_alg_authenc_hmac_sha1_cbc_des3_ede,
+	&safexcel_alg_authenc_hmac_sha1_ctr_aes,
+	&safexcel_alg_authenc_hmac_sha224_ctr_aes,
+	&safexcel_alg_authenc_hmac_sha256_ctr_aes,
+	&safexcel_alg_authenc_hmac_sha384_ctr_aes,
+	&safexcel_alg_authenc_hmac_sha512_ctr_aes,
+	&safexcel_alg_xts_aes,
+	&safexcel_alg_gcm,
+	&safexcel_alg_ccm,
+};
+
+static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
+{
+	int i, j, ret = 0;
+
+	for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
+		safexcel_algs[i]->priv = priv;
+
+		/* Do we have all required base algorithms available? */
+		if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
+		    safexcel_algs[i]->algo_mask)
+			/* No, so don't register this ciphersuite */
+			continue;
+
+		if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
+			ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
+		else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
+			ret = crypto_register_aead(&safexcel_algs[i]->alg.aead);
+		else
+			ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
+
+		if (ret)
+			goto fail;
+	}
+
+	return 0;
+
+fail:
+	for (j = 0; j < i; j++) {
+		/* Do we have all required base algorithms available? */
+		if ((safexcel_algs[j]->algo_mask & priv->hwconfig.algo_flags) !=
+		    safexcel_algs[j]->algo_mask)
+			/* No, so don't unregister this ciphersuite */
+			continue;
+
+		if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
+			crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
+		else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
+			crypto_unregister_aead(&safexcel_algs[j]->alg.aead);
+		else
+			crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
+	}
+
+	return ret;
+}
+
+static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
+		/* Do we have all required base algorithms available? */
+		if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
+		    safexcel_algs[i]->algo_mask)
+			/* No, so don't unregister this ciphersuite */
+			continue;
+
+		if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
+			crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
+		else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
+			crypto_unregister_aead(&safexcel_algs[i]->alg.aead);
+		else
+			crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
+	}
+}
+
+static void safexcel_configure(struct safexcel_crypto_priv *priv)
+{
+	u32 val, mask = 0;
+
+	val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
+
+	/* Read number of PEs from the engine */
+	if (priv->flags & SAFEXCEL_HW_EIP197)
+		/* Wider field width for all EIP197 type engines */
+		mask = EIP197_N_PES_MASK;
+	else
+		/* Narrow field width for EIP97 type engine */
+		mask = EIP97_N_PES_MASK;
+
+	priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask;
+
+	priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
+
+	val = (val & GENMASK(27, 25)) >> 25;
+	mask = BIT(val) - 1;
+
+	priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
+	priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
+
+	priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
+	priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
+}
+
+static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
+{
+	struct safexcel_register_offsets *offsets = &priv->offsets;
+
+	if (priv->flags & SAFEXCEL_HW_EIP197) {
+		offsets->hia_aic	= EIP197_HIA_AIC_BASE;
+		offsets->hia_aic_g	= EIP197_HIA_AIC_G_BASE;
+		offsets->hia_aic_r	= EIP197_HIA_AIC_R_BASE;
+		offsets->hia_aic_xdr	= EIP197_HIA_AIC_xDR_BASE;
+		offsets->hia_dfe	= EIP197_HIA_DFE_BASE;
+		offsets->hia_dfe_thr	= EIP197_HIA_DFE_THR_BASE;
+		offsets->hia_dse	= EIP197_HIA_DSE_BASE;
+		offsets->hia_dse_thr	= EIP197_HIA_DSE_THR_BASE;
+		offsets->hia_gen_cfg	= EIP197_HIA_GEN_CFG_BASE;
+		offsets->pe		= EIP197_PE_BASE;
+		offsets->global		= EIP197_GLOBAL_BASE;
+	} else {
+		offsets->hia_aic	= EIP97_HIA_AIC_BASE;
+		offsets->hia_aic_g	= EIP97_HIA_AIC_G_BASE;
+		offsets->hia_aic_r	= EIP97_HIA_AIC_R_BASE;
+		offsets->hia_aic_xdr	= EIP97_HIA_AIC_xDR_BASE;
+		offsets->hia_dfe	= EIP97_HIA_DFE_BASE;
+		offsets->hia_dfe_thr	= EIP97_HIA_DFE_THR_BASE;
+		offsets->hia_dse	= EIP97_HIA_DSE_BASE;
+		offsets->hia_dse_thr	= EIP97_HIA_DSE_THR_BASE;
+		offsets->hia_gen_cfg	= EIP97_HIA_GEN_CFG_BASE;
+		offsets->pe		= EIP97_PE_BASE;
+		offsets->global		= EIP97_GLOBAL_BASE;
+	}
+}
+
+/*
+ * Generic part of probe routine, shared by platform and PCI driver
+ *
+ * Assumes IO resources have been mapped, private data mem has been allocated,
+ * clocks have been enabled, device pointer has been assigned etc.
+ *
+ */
+static int safexcel_probe_generic(void *pdev,
+				  struct safexcel_crypto_priv *priv,
+				  int is_pci_dev)
+{
+	struct device *dev = priv->dev;
+	u32 peid, version, mask, val, hiaopt;
+	int i, ret, hwctg;
+
+	priv->context_pool = dmam_pool_create("safexcel-context", dev,
+					      sizeof(struct safexcel_context_record),
+					      1, 0);
+	if (!priv->context_pool)
+		return -ENOMEM;
+
+	/*
+	 * First try the EIP97 HIA version regs
+	 * For the EIP197, this is guaranteed to NOT return any of the test
+	 * values
+	 */
+	version = readl(priv->base + EIP97_HIA_AIC_BASE + EIP197_HIA_VERSION);
+
+	mask = 0;  /* do not swap */
+	if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
+		priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
+	} else if (EIP197_REG_HI16(version) == EIP197_HIA_VERSION_BE) {
+		/* read back byte-swapped, so complement byte swap bits */
+		mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
+		priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
+	} else {
+		/* So it wasn't an EIP97 ... maybe it's an EIP197? */
+		version = readl(priv->base + EIP197_HIA_AIC_BASE +
+				EIP197_HIA_VERSION);
+		if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
+			priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
+			priv->flags |= SAFEXCEL_HW_EIP197;
+		} else if (EIP197_REG_HI16(version) ==
+			   EIP197_HIA_VERSION_BE) {
+			/* read back byte-swapped, so complement swap bits */
+			mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
+			priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
+			priv->flags |= SAFEXCEL_HW_EIP197;
+		} else {
+			return -ENODEV;
+		}
+	}
+
+	/* Now initialize the reg offsets based on the probing info so far */
+	safexcel_init_register_offsets(priv);
+
+	/*
+	 * If the version was read byte-swapped, we need to flip the device
+	 * swapping Keep in mind here, though, that what we write will also be
+	 * byte-swapped ...
+	 */
+	if (mask) {
+		val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
+		val = val ^ (mask >> 24); /* toggle byte swap bits */
+		writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
+	}
+
+	/*
+	 * We're not done probing yet! We may fall through to here if no HIA
+	 * was found at all. So, with the endianness presumably correct now and
+	 * the offsets setup, *really* probe for the EIP97/EIP197.
+	 */
+	version = readl(EIP197_GLOBAL(priv) + EIP197_VERSION);
+	if (((priv->flags & SAFEXCEL_HW_EIP197) &&
+	     (EIP197_REG_LO16(version) != EIP197_VERSION_LE)) ||
+	    ((!(priv->flags & SAFEXCEL_HW_EIP197) &&
+	     (EIP197_REG_LO16(version) != EIP97_VERSION_LE)))) {
+		/*
+		 * We did not find the device that matched our initial probing
+		 * (or our initial probing failed) Report appropriate error.
+		 */
+		return -ENODEV;
+	}
+
+	priv->hwconfig.hwver = EIP197_VERSION_MASK(version);
+	hwctg = version >> 28;
+	peid = version & 255;
+
+	/* Detect EIP96 packet engine and version */
+	version = readl(EIP197_PE(priv) + EIP197_PE_EIP96_VERSION(0));
+	if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) {
+		dev_err(dev, "EIP%d: EIP96 not detected.\n", peid);
+		return -ENODEV;
+	}
+	priv->hwconfig.pever = EIP197_VERSION_MASK(version);
+
+	hiaopt = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_OPTIONS);
+
+	if (priv->flags & SAFEXCEL_HW_EIP197) {
+		/* EIP197 */
+		priv->hwconfig.hwdataw  = (hiaopt >> EIP197_HWDATAW_OFFSET) &
+					  EIP197_HWDATAW_MASK;
+		priv->hwconfig.hwcfsize = ((hiaopt >> EIP197_CFSIZE_OFFSET) &
+					   EIP197_CFSIZE_MASK) +
+					  EIP197_CFSIZE_ADJUST;
+		priv->hwconfig.hwrfsize = ((hiaopt >> EIP197_RFSIZE_OFFSET) &
+					   EIP197_RFSIZE_MASK) +
+					  EIP197_RFSIZE_ADJUST;
+	} else {
+		/* EIP97 */
+		priv->hwconfig.hwdataw  = (hiaopt >> EIP197_HWDATAW_OFFSET) &
+					  EIP97_HWDATAW_MASK;
+		priv->hwconfig.hwcfsize = (hiaopt >> EIP97_CFSIZE_OFFSET) &
+					  EIP97_CFSIZE_MASK;
+		priv->hwconfig.hwrfsize = (hiaopt >> EIP97_RFSIZE_OFFSET) &
+					  EIP97_RFSIZE_MASK;
+	}
+
+	/* Get supported algorithms from EIP96 transform engine */
+	priv->hwconfig.algo_flags = readl(EIP197_PE(priv) +
+				    EIP197_PE_EIP96_OPTIONS(0));
+
+	/* Print single info line describing what we just detected */
+	dev_info(priv->dev, "EIP%d:%x(%d)-HIA:%x(%d,%d,%d),PE:%x,alg:%08x\n",
+		 peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hiaver,
+		 priv->hwconfig.hwdataw, priv->hwconfig.hwcfsize,
+		 priv->hwconfig.hwrfsize, priv->hwconfig.pever,
+		 priv->hwconfig.algo_flags);
+
+	safexcel_configure(priv);
+
+	if (IS_ENABLED(CONFIG_PCI) && priv->version == EIP197_DEVBRD) {
+		/*
+		 * Request MSI vectors for global + 1 per ring -
+		 * or just 1 for older dev images
+		 */
+		struct pci_dev *pci_pdev = pdev;
+
+		ret = pci_alloc_irq_vectors(pci_pdev,
+					    priv->config.rings + 1,
+					    priv->config.rings + 1,
+					    PCI_IRQ_MSI | PCI_IRQ_MSIX);
+		if (ret < 0) {
+			dev_err(dev, "Failed to allocate PCI MSI interrupts\n");
+			return ret;
+		}
+	}
+
+	/* Register the ring IRQ handlers and configure the rings */
+	priv->ring = devm_kcalloc(dev, priv->config.rings,
+				  sizeof(*priv->ring),
+				  GFP_KERNEL);
+	if (!priv->ring)
+		return -ENOMEM;
+
+	for (i = 0; i < priv->config.rings; i++) {
+		char wq_name[9] = {0};
+		int irq;
+		struct safexcel_ring_irq_data *ring_irq;
+
+		ret = safexcel_init_ring_descriptors(priv,
+						     &priv->ring[i].cdr,
+						     &priv->ring[i].rdr);
+		if (ret) {
+			dev_err(dev, "Failed to initialize rings\n");
+			goto err_cleanup_rings;
+		}
+
+		priv->ring[i].rdr_req = devm_kcalloc(dev,
+			EIP197_DEFAULT_RING_SIZE,
+			sizeof(*priv->ring[i].rdr_req),
+			GFP_KERNEL);
+		if (!priv->ring[i].rdr_req) {
+			ret = -ENOMEM;
+			goto err_cleanup_rings;
+		}
+
+		ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
+		if (!ring_irq) {
+			ret = -ENOMEM;
+			goto err_cleanup_rings;
+		}
+
+		ring_irq->priv = priv;
+		ring_irq->ring = i;
+
+		irq = safexcel_request_ring_irq(pdev,
+						EIP197_IRQ_NUMBER(i, is_pci_dev),
+						is_pci_dev,
+						i,
+						safexcel_irq_ring,
+						safexcel_irq_ring_thread,
+						ring_irq);
+		if (irq < 0) {
+			dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
+			ret = irq;
+			goto err_cleanup_rings;
+		}
+
+		priv->ring[i].irq = irq;
+		priv->ring[i].work_data.priv = priv;
+		priv->ring[i].work_data.ring = i;
+		INIT_WORK(&priv->ring[i].work_data.work,
+			  safexcel_dequeue_work);
+
+		snprintf(wq_name, 9, "wq_ring%d", i);
+		priv->ring[i].workqueue =
+			create_singlethread_workqueue(wq_name);
+		if (!priv->ring[i].workqueue) {
+			ret = -ENOMEM;
+			goto err_cleanup_rings;
+		}
+
+		priv->ring[i].requests = 0;
+		priv->ring[i].busy = false;
+
+		crypto_init_queue(&priv->ring[i].queue,
+				  EIP197_DEFAULT_RING_SIZE);
+
+		spin_lock_init(&priv->ring[i].lock);
+		spin_lock_init(&priv->ring[i].queue_lock);
+	}
+
+	atomic_set(&priv->ring_used, 0);
+
+	ret = safexcel_hw_init(priv);
+	if (ret) {
+		dev_err(dev, "HW init failed (%d)\n", ret);
+		goto err_cleanup_rings;
+	}
+
+	ret = safexcel_register_algorithms(priv);
+	if (ret) {
+		dev_err(dev, "Failed to register algorithms (%d)\n", ret);
+		goto err_cleanup_rings;
+	}
+
+	return 0;
+
+err_cleanup_rings:
+	for (i = 0; i < priv->config.rings; i++) {
+		if (priv->ring[i].irq)
+			irq_set_affinity_hint(priv->ring[i].irq, NULL);
+		if (priv->ring[i].workqueue)
+			destroy_workqueue(priv->ring[i].workqueue);
+	}
+
+	return ret;
+}
+
+static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < priv->config.rings; i++) {
+		/* clear any pending interrupt */
+		writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
+		writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
+
+		/* Reset the CDR base address */
+		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+		writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+
+		/* Reset the RDR base address */
+		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+		writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+	}
+}
+
+#if IS_ENABLED(CONFIG_OF)
+/* for Device Tree platform driver */
+
+static int safexcel_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct safexcel_crypto_priv *priv;
+	int ret;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->dev = dev;
+	priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
+
+	platform_set_drvdata(pdev, priv);
+
+	priv->base = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(priv->base)) {
+		dev_err(dev, "failed to get resource\n");
+		return PTR_ERR(priv->base);
+	}
+
+	priv->clk = devm_clk_get(&pdev->dev, NULL);
+	ret = PTR_ERR_OR_ZERO(priv->clk);
+	/* The clock isn't mandatory */
+	if  (ret != -ENOENT) {
+		if (ret)
+			return ret;
+
+		ret = clk_prepare_enable(priv->clk);
+		if (ret) {
+			dev_err(dev, "unable to enable clk (%d)\n", ret);
+			return ret;
+		}
+	}
+
+	priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
+	ret = PTR_ERR_OR_ZERO(priv->reg_clk);
+	/* The clock isn't mandatory */
+	if  (ret != -ENOENT) {
+		if (ret)
+			goto err_core_clk;
+
+		ret = clk_prepare_enable(priv->reg_clk);
+		if (ret) {
+			dev_err(dev, "unable to enable reg clk (%d)\n", ret);
+			goto err_core_clk;
+		}
+	}
+
+	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+	if (ret)
+		goto err_reg_clk;
+
+	/* Generic EIP97/EIP197 device probing */
+	ret = safexcel_probe_generic(pdev, priv, 0);
+	if (ret)
+		goto err_reg_clk;
+
+	return 0;
+
+err_reg_clk:
+	clk_disable_unprepare(priv->reg_clk);
+err_core_clk:
+	clk_disable_unprepare(priv->clk);
+	return ret;
+}
+
+static int safexcel_remove(struct platform_device *pdev)
+{
+	struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
+	int i;
+
+	safexcel_unregister_algorithms(priv);
+	safexcel_hw_reset_rings(priv);
+
+	clk_disable_unprepare(priv->clk);
+
+	for (i = 0; i < priv->config.rings; i++) {
+		irq_set_affinity_hint(priv->ring[i].irq, NULL);
+		destroy_workqueue(priv->ring[i].workqueue);
+	}
+
+	return 0;
+}
+
+static const struct of_device_id safexcel_of_match_table[] = {
+	{
+		.compatible = "inside-secure,safexcel-eip97ies",
+		.data = (void *)EIP97IES_MRVL,
+	},
+	{
+		.compatible = "inside-secure,safexcel-eip197b",
+		.data = (void *)EIP197B_MRVL,
+	},
+	{
+		.compatible = "inside-secure,safexcel-eip197d",
+		.data = (void *)EIP197D_MRVL,
+	},
+	/* For backward compatibility and intended for generic use */
+	{
+		.compatible = "inside-secure,safexcel-eip97",
+		.data = (void *)EIP97IES_MRVL,
+	},
+	{
+		.compatible = "inside-secure,safexcel-eip197",
+		.data = (void *)EIP197B_MRVL,
+	},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, safexcel_of_match_table);
+
+static struct platform_driver  crypto_safexcel = {
+	.probe		= safexcel_probe,
+	.remove		= safexcel_remove,
+	.driver		= {
+		.name	= "crypto-safexcel",
+		.of_match_table = safexcel_of_match_table,
+	},
+};
+#endif
+
+#if IS_ENABLED(CONFIG_PCI)
+/* PCIE devices - i.e. Inside Secure development boards */
+
+static int safexcel_pci_probe(struct pci_dev *pdev,
+			       const struct pci_device_id *ent)
+{
+	struct device *dev = &pdev->dev;
+	struct safexcel_crypto_priv *priv;
+	void __iomem *pciebase;
+	int rc;
+	u32 val;
+
+	dev_dbg(dev, "Probing PCIE device: vendor %04x, device %04x, subv %04x, subdev %04x, ctxt %lx\n",
+		ent->vendor, ent->device, ent->subvendor,
+		ent->subdevice, ent->driver_data);
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->dev = dev;
+	priv->version = (enum safexcel_eip_version)ent->driver_data;
+
+	pci_set_drvdata(pdev, priv);
+
+	/* enable the device */
+	rc = pcim_enable_device(pdev);
+	if (rc) {
+		dev_err(dev, "Failed to enable PCI device\n");
+		return rc;
+	}
+
+	/* take ownership of PCI BAR0 */
+	rc = pcim_iomap_regions(pdev, 1, "crypto_safexcel");
+	if (rc) {
+		dev_err(dev, "Failed to map IO region for BAR0\n");
+		return rc;
+	}
+	priv->base = pcim_iomap_table(pdev)[0];
+
+	if (priv->version == EIP197_DEVBRD) {
+		dev_dbg(dev, "Device identified as FPGA based development board - applying HW reset\n");
+
+		rc = pcim_iomap_regions(pdev, 4, "crypto_safexcel");
+		if (rc) {
+			dev_err(dev, "Failed to map IO region for BAR4\n");
+			return rc;
+		}
+
+		pciebase = pcim_iomap_table(pdev)[2];
+		val = readl(pciebase + EIP197_XLX_IRQ_BLOCK_ID_ADDR);
+		if ((val >> 16) == EIP197_XLX_IRQ_BLOCK_ID_VALUE) {
+			dev_dbg(dev, "Detected Xilinx PCIE IRQ block version %d, multiple MSI support enabled\n",
+				(val & 0xff));
+
+			/* Setup MSI identity map mapping */
+			writel(EIP197_XLX_USER_VECT_LUT0_IDENT,
+			       pciebase + EIP197_XLX_USER_VECT_LUT0_ADDR);
+			writel(EIP197_XLX_USER_VECT_LUT1_IDENT,
+			       pciebase + EIP197_XLX_USER_VECT_LUT1_ADDR);
+			writel(EIP197_XLX_USER_VECT_LUT2_IDENT,
+			       pciebase + EIP197_XLX_USER_VECT_LUT2_ADDR);
+			writel(EIP197_XLX_USER_VECT_LUT3_IDENT,
+			       pciebase + EIP197_XLX_USER_VECT_LUT3_ADDR);
+
+			/* Enable all device interrupts */
+			writel(GENMASK(31, 0),
+			       pciebase + EIP197_XLX_USER_INT_ENB_MSK);
+		} else {
+			dev_err(dev, "Unrecognised IRQ block identifier %x\n",
+				val);
+			return -ENODEV;
+		}
+
+		/* HW reset FPGA dev board */
+		/* assert reset */
+		writel(1, priv->base + EIP197_XLX_GPIO_BASE);
+		wmb(); /* maintain strict ordering for accesses here */
+		/* deassert reset */
+		writel(0, priv->base + EIP197_XLX_GPIO_BASE);
+		wmb(); /* maintain strict ordering for accesses here */
+	}
+
+	/* enable bus mastering */
+	pci_set_master(pdev);
+
+	/* Generic EIP97/EIP197 device probing */
+	rc = safexcel_probe_generic(pdev, priv, 1);
+	return rc;
+}
+
+void safexcel_pci_remove(struct pci_dev *pdev)
+{
+	struct safexcel_crypto_priv *priv = pci_get_drvdata(pdev);
+	int i;
+
+	safexcel_unregister_algorithms(priv);
+
+	for (i = 0; i < priv->config.rings; i++)
+		destroy_workqueue(priv->ring[i].workqueue);
+
+	safexcel_hw_reset_rings(priv);
+}
+
+static const struct pci_device_id safexcel_pci_ids[] = {
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_XILINX, 0x9038,
+			       0x16ae, 0xc522),
+		.driver_data = EIP197_DEVBRD,
+	},
+	{},
+};
+
+MODULE_DEVICE_TABLE(pci, safexcel_pci_ids);
+
+static struct pci_driver safexcel_pci_driver = {
+	.name          = "crypto-safexcel",
+	.id_table      = safexcel_pci_ids,
+	.probe         = safexcel_pci_probe,
+	.remove        = safexcel_pci_remove,
+};
+#endif
+
+/* Unfortunately, we have to resort to global variables here */
+#if IS_ENABLED(CONFIG_PCI)
+int pcireg_rc = -EINVAL; /* Default safe value */
+#endif
+#if IS_ENABLED(CONFIG_OF)
+int ofreg_rc = -EINVAL; /* Default safe value */
+#endif
+
+static int __init safexcel_init(void)
+{
+#if IS_ENABLED(CONFIG_PCI)
+	/* Register PCI driver */
+	pcireg_rc = pci_register_driver(&safexcel_pci_driver);
+#endif
+
+#if IS_ENABLED(CONFIG_OF)
+	/* Register platform driver */
+	ofreg_rc = platform_driver_register(&crypto_safexcel);
+ #if IS_ENABLED(CONFIG_PCI)
+	/* Return success if either PCI or OF registered OK */
+	return pcireg_rc ? ofreg_rc : 0;
+ #else
+	return ofreg_rc;
+ #endif
+#else
+ #if IS_ENABLED(CONFIG_PCI)
+	return pcireg_rc;
+ #else
+	return -EINVAL;
+ #endif
+#endif
+}
+
+static void __exit safexcel_exit(void)
+{
+#if IS_ENABLED(CONFIG_OF)
+	/* Unregister platform driver */
+	if (!ofreg_rc)
+		platform_driver_unregister(&crypto_safexcel);
+#endif
+
+#if IS_ENABLED(CONFIG_PCI)
+	/* Unregister PCI driver if successfully registered before */
+	if (!pcireg_rc)
+		pci_unregister_driver(&safexcel_pci_driver);
+#endif
+}
+
+module_init(safexcel_init);
+module_exit(safexcel_exit);
+
+MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
+MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
+MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
+MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197");
+MODULE_LICENSE("GPL v2");
diff --git a/marvell/linux/drivers/crypto/inside-secure/safexcel.h b/marvell/linux/drivers/crypto/inside-secure/safexcel.h
new file mode 100644
index 0000000..6a4d7f0
--- /dev/null
+++ b/marvell/linux/drivers/crypto/inside-secure/safexcel.h
@@ -0,0 +1,860 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 Marvell
+ *
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ */
+
+#ifndef __SAFEXCEL_H__
+#define __SAFEXCEL_H__
+
+#include <crypto/aead.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <crypto/skcipher.h>
+
+#define EIP197_HIA_VERSION_BE			0xca35
+#define EIP197_HIA_VERSION_LE			0x35ca
+#define EIP97_VERSION_LE			0x9e61
+#define EIP197_VERSION_LE			0x3ac5
+#define EIP96_VERSION_LE			0x9f60
+#define EIP197_REG_LO16(reg)			(reg & 0xffff)
+#define EIP197_REG_HI16(reg)			((reg >> 16) & 0xffff)
+#define EIP197_VERSION_MASK(reg)		((reg >> 16) & 0xfff)
+#define EIP197_VERSION_SWAP(reg)		(((reg & 0xf0) << 4) | \
+						((reg >> 4) & 0xf0) | \
+						((reg >> 12) & 0xf))
+
+/* Static configuration */
+#define EIP197_DEFAULT_RING_SIZE		400
+#define EIP197_MAX_TOKENS			18
+#define EIP197_MAX_RINGS			4
+#define EIP197_FETCH_DEPTH			2
+#define EIP197_MAX_BATCH_SZ			64
+
+#define EIP197_GFP_FLAGS(base)	((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \
+				 GFP_KERNEL : GFP_ATOMIC)
+
+/* Custom on-stack requests (for invalidation) */
+#define EIP197_SKCIPHER_REQ_SIZE	sizeof(struct skcipher_request) + \
+					sizeof(struct safexcel_cipher_req)
+#define EIP197_AHASH_REQ_SIZE		sizeof(struct ahash_request) + \
+					sizeof(struct safexcel_ahash_req)
+#define EIP197_AEAD_REQ_SIZE		sizeof(struct aead_request) + \
+					sizeof(struct safexcel_cipher_req)
+#define EIP197_REQUEST_ON_STACK(name, type, size) \
+	char __##name##_desc[size] CRYPTO_MINALIGN_ATTR; \
+	struct type##_request *name = (void *)__##name##_desc
+
+/* Xilinx dev board base offsets */
+#define EIP197_XLX_GPIO_BASE		0x200000
+#define EIP197_XLX_IRQ_BLOCK_ID_ADDR	0x2000
+#define EIP197_XLX_IRQ_BLOCK_ID_VALUE	0x1fc2
+#define EIP197_XLX_USER_INT_ENB_MSK	0x2004
+#define EIP197_XLX_USER_INT_ENB_SET	0x2008
+#define EIP197_XLX_USER_INT_ENB_CLEAR	0x200c
+#define EIP197_XLX_USER_INT_BLOCK	0x2040
+#define EIP197_XLX_USER_INT_PEND	0x2048
+#define EIP197_XLX_USER_VECT_LUT0_ADDR	0x2080
+#define EIP197_XLX_USER_VECT_LUT0_IDENT	0x03020100
+#define EIP197_XLX_USER_VECT_LUT1_ADDR	0x2084
+#define EIP197_XLX_USER_VECT_LUT1_IDENT	0x07060504
+#define EIP197_XLX_USER_VECT_LUT2_ADDR	0x2088
+#define EIP197_XLX_USER_VECT_LUT2_IDENT	0x0b0a0908
+#define EIP197_XLX_USER_VECT_LUT3_ADDR	0x208c
+#define EIP197_XLX_USER_VECT_LUT3_IDENT	0x0f0e0d0c
+
+/* Helper defines for probe function */
+#define EIP197_IRQ_NUMBER(i, is_pci)	(i + is_pci)
+
+/* Register base offsets */
+#define EIP197_HIA_AIC(priv)		((priv)->base + (priv)->offsets.hia_aic)
+#define EIP197_HIA_AIC_G(priv)		((priv)->base + (priv)->offsets.hia_aic_g)
+#define EIP197_HIA_AIC_R(priv)		((priv)->base + (priv)->offsets.hia_aic_r)
+#define EIP197_HIA_AIC_xDR(priv)	((priv)->base + (priv)->offsets.hia_aic_xdr)
+#define EIP197_HIA_DFE(priv)		((priv)->base + (priv)->offsets.hia_dfe)
+#define EIP197_HIA_DFE_THR(priv)	((priv)->base + (priv)->offsets.hia_dfe_thr)
+#define EIP197_HIA_DSE(priv)		((priv)->base + (priv)->offsets.hia_dse)
+#define EIP197_HIA_DSE_THR(priv)	((priv)->base + (priv)->offsets.hia_dse_thr)
+#define EIP197_HIA_GEN_CFG(priv)	((priv)->base + (priv)->offsets.hia_gen_cfg)
+#define EIP197_PE(priv)			((priv)->base + (priv)->offsets.pe)
+#define EIP197_GLOBAL(priv)		((priv)->base + (priv)->offsets.global)
+
+/* EIP197 base offsets */
+#define EIP197_HIA_AIC_BASE		0x90000
+#define EIP197_HIA_AIC_G_BASE		0x90000
+#define EIP197_HIA_AIC_R_BASE		0x90800
+#define EIP197_HIA_AIC_xDR_BASE		0x80000
+#define EIP197_HIA_DFE_BASE		0x8c000
+#define EIP197_HIA_DFE_THR_BASE		0x8c040
+#define EIP197_HIA_DSE_BASE		0x8d000
+#define EIP197_HIA_DSE_THR_BASE		0x8d040
+#define EIP197_HIA_GEN_CFG_BASE		0xf0000
+#define EIP197_PE_BASE			0xa0000
+#define EIP197_GLOBAL_BASE		0xf0000
+
+/* EIP97 base offsets */
+#define EIP97_HIA_AIC_BASE		0x0
+#define EIP97_HIA_AIC_G_BASE		0x0
+#define EIP97_HIA_AIC_R_BASE		0x0
+#define EIP97_HIA_AIC_xDR_BASE		0x0
+#define EIP97_HIA_DFE_BASE		0xf000
+#define EIP97_HIA_DFE_THR_BASE		0xf200
+#define EIP97_HIA_DSE_BASE		0xf400
+#define EIP97_HIA_DSE_THR_BASE		0xf600
+#define EIP97_HIA_GEN_CFG_BASE		0x10000
+#define EIP97_PE_BASE			0x10000
+#define EIP97_GLOBAL_BASE		0x10000
+
+/* CDR/RDR register offsets */
+#define EIP197_HIA_xDR_OFF(priv, r)		(EIP197_HIA_AIC_xDR(priv) + (r) * 0x1000)
+#define EIP197_HIA_CDR(priv, r)			(EIP197_HIA_xDR_OFF(priv, r))
+#define EIP197_HIA_RDR(priv, r)			(EIP197_HIA_xDR_OFF(priv, r) + 0x800)
+#define EIP197_HIA_xDR_RING_BASE_ADDR_LO	0x0000
+#define EIP197_HIA_xDR_RING_BASE_ADDR_HI	0x0004
+#define EIP197_HIA_xDR_RING_SIZE		0x0018
+#define EIP197_HIA_xDR_DESC_SIZE		0x001c
+#define EIP197_HIA_xDR_CFG			0x0020
+#define EIP197_HIA_xDR_DMA_CFG			0x0024
+#define EIP197_HIA_xDR_THRESH			0x0028
+#define EIP197_HIA_xDR_PREP_COUNT		0x002c
+#define EIP197_HIA_xDR_PROC_COUNT		0x0030
+#define EIP197_HIA_xDR_PREP_PNTR		0x0034
+#define EIP197_HIA_xDR_PROC_PNTR		0x0038
+#define EIP197_HIA_xDR_STAT			0x003c
+
+/* register offsets */
+#define EIP197_HIA_DFE_CFG(n)			(0x0000 + (128 * (n)))
+#define EIP197_HIA_DFE_THR_CTRL(n)		(0x0000 + (128 * (n)))
+#define EIP197_HIA_DFE_THR_STAT(n)		(0x0004 + (128 * (n)))
+#define EIP197_HIA_DSE_CFG(n)			(0x0000 + (128 * (n)))
+#define EIP197_HIA_DSE_THR_CTRL(n)		(0x0000 + (128 * (n)))
+#define EIP197_HIA_DSE_THR_STAT(n)		(0x0004 + (128 * (n)))
+#define EIP197_HIA_RA_PE_CTRL(n)		(0x0010 + (8   * (n)))
+#define EIP197_HIA_RA_PE_STAT			0x0014
+#define EIP197_HIA_AIC_R_OFF(r)			((r) * 0x1000)
+#define EIP197_HIA_AIC_R_ENABLE_CTRL(r)		(0xe008 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ENABLED_STAT(r)	(0xe010 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ACK(r)			(0xe010 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ENABLE_CLR(r)		(0xe014 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_G_ENABLE_CTRL		0xf808
+#define EIP197_HIA_AIC_G_ENABLED_STAT		0xf810
+#define EIP197_HIA_AIC_G_ACK			0xf810
+#define EIP197_HIA_MST_CTRL			0xfff4
+#define EIP197_HIA_OPTIONS			0xfff8
+#define EIP197_HIA_VERSION			0xfffc
+#define EIP197_PE_IN_DBUF_THRES(n)		(0x0000 + (0x2000 * (n)))
+#define EIP197_PE_IN_TBUF_THRES(n)		(0x0100 + (0x2000 * (n)))
+#define EIP197_PE_ICE_SCRATCH_RAM(n)		(0x0800 + (0x2000 * (n)))
+#define EIP197_PE_ICE_PUE_CTRL(n)		(0x0c80 + (0x2000 * (n)))
+#define EIP197_PE_ICE_PUTF_CTRL(n)		(0x0d00 + (0x2000 * (n)))
+#define EIP197_PE_ICE_SCRATCH_CTRL(n)		(0x0d04 + (0x2000 * (n)))
+#define EIP197_PE_ICE_FPP_CTRL(n)		(0x0d80 + (0x2000 * (n)))
+#define EIP197_PE_ICE_PPTF_CTRL(n)		(0x0e00 + (0x2000 * (n)))
+#define EIP197_PE_ICE_RAM_CTRL(n)		(0x0ff0 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_TOKEN_CTRL(n)		(0x1000 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_FUNCTION_EN(n)		(0x1004 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_CONTEXT_CTRL(n)		(0x1008 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_CONTEXT_STAT(n)		(0x100c + (0x2000 * (n)))
+#define EIP197_PE_EIP96_FUNCTION2_EN(n)		(0x1030 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_OPTIONS(n)		(0x13f8 + (0x2000 * (n)))
+#define EIP197_PE_EIP96_VERSION(n)		(0x13fc + (0x2000 * (n)))
+#define EIP197_PE_OUT_DBUF_THRES(n)		(0x1c00 + (0x2000 * (n)))
+#define EIP197_PE_OUT_TBUF_THRES(n)		(0x1d00 + (0x2000 * (n)))
+#define EIP197_MST_CTRL				0xfff4
+#define EIP197_VERSION				0xfffc
+
+/* EIP197-specific registers, no indirection */
+#define EIP197_CLASSIFICATION_RAMS		0xe0000
+#define EIP197_TRC_CTRL				0xf0800
+#define EIP197_TRC_LASTRES			0xf0804
+#define EIP197_TRC_REGINDEX			0xf0808
+#define EIP197_TRC_PARAMS			0xf0820
+#define EIP197_TRC_FREECHAIN			0xf0824
+#define EIP197_TRC_PARAMS2			0xf0828
+#define EIP197_TRC_ECCCTRL			0xf0830
+#define EIP197_TRC_ECCSTAT			0xf0834
+#define EIP197_TRC_ECCADMINSTAT			0xf0838
+#define EIP197_TRC_ECCDATASTAT			0xf083c
+#define EIP197_TRC_ECCDATA			0xf0840
+#define EIP197_FLUE_CACHEBASE_LO(n)		(0xf6000 + (32 * (n)))
+#define EIP197_FLUE_CACHEBASE_HI(n)		(0xf6004 + (32 * (n)))
+#define EIP197_FLUE_CONFIG(n)			(0xf6010 + (32 * (n)))
+#define EIP197_FLUE_OFFSETS			0xf6808
+#define EIP197_FLUE_ARC4_OFFSET			0xf680c
+#define EIP197_FLUE_IFC_LUT(n)			(0xf6820 + (4 * (n)))
+#define EIP197_CS_RAM_CTRL			0xf7ff0
+
+/* EIP197_HIA_xDR_DESC_SIZE */
+#define EIP197_xDR_DESC_MODE_64BIT		BIT(31)
+
+/* EIP197_HIA_xDR_DMA_CFG */
+#define EIP197_HIA_xDR_WR_RES_BUF		BIT(22)
+#define EIP197_HIA_xDR_WR_CTRL_BUF		BIT(23)
+#define EIP197_HIA_xDR_WR_OWN_BUF		BIT(24)
+#define EIP197_HIA_xDR_CFG_WR_CACHE(n)		(((n) & 0x7) << 25)
+#define EIP197_HIA_xDR_CFG_RD_CACHE(n)		(((n) & 0x7) << 29)
+
+/* EIP197_HIA_CDR_THRESH */
+#define EIP197_HIA_CDR_THRESH_PROC_PKT(n)	(n)
+#define EIP197_HIA_CDR_THRESH_PROC_MODE		BIT(22)
+#define EIP197_HIA_CDR_THRESH_PKT_MODE		BIT(23)
+#define EIP197_HIA_CDR_THRESH_TIMEOUT(n)	((n) << 24) /* x256 clk cycles */
+
+/* EIP197_HIA_RDR_THRESH */
+#define EIP197_HIA_RDR_THRESH_PROC_PKT(n)	(n)
+#define EIP197_HIA_RDR_THRESH_PKT_MODE		BIT(23)
+#define EIP197_HIA_RDR_THRESH_TIMEOUT(n)	((n) << 24) /* x256 clk cycles */
+
+/* EIP197_HIA_xDR_PREP_COUNT */
+#define EIP197_xDR_PREP_CLR_COUNT		BIT(31)
+
+/* EIP197_HIA_xDR_PROC_COUNT */
+#define EIP197_xDR_PROC_xD_PKT_OFFSET		24
+#define EIP197_xDR_PROC_xD_PKT_MASK		GENMASK(6, 0)
+#define EIP197_xDR_PROC_xD_COUNT(n)		((n) << 2)
+#define EIP197_xDR_PROC_xD_PKT(n)		((n) << 24)
+#define EIP197_xDR_PROC_CLR_COUNT		BIT(31)
+
+/* EIP197_HIA_xDR_STAT */
+#define EIP197_xDR_DMA_ERR			BIT(0)
+#define EIP197_xDR_PREP_CMD_THRES		BIT(1)
+#define EIP197_xDR_ERR				BIT(2)
+#define EIP197_xDR_THRESH			BIT(4)
+#define EIP197_xDR_TIMEOUT			BIT(5)
+
+#define EIP197_HIA_RA_PE_CTRL_RESET		BIT(31)
+#define EIP197_HIA_RA_PE_CTRL_EN		BIT(30)
+
+/* EIP197_HIA_OPTIONS */
+#define EIP197_N_PES_OFFSET			4
+#define EIP197_N_PES_MASK			GENMASK(4, 0)
+#define EIP97_N_PES_MASK			GENMASK(2, 0)
+#define EIP197_HWDATAW_OFFSET			25
+#define EIP197_HWDATAW_MASK			GENMASK(3, 0)
+#define EIP97_HWDATAW_MASK			GENMASK(2, 0)
+#define EIP197_CFSIZE_OFFSET			9
+#define EIP197_CFSIZE_ADJUST			4
+#define EIP97_CFSIZE_OFFSET			8
+#define EIP197_CFSIZE_MASK			GENMASK(3, 0)
+#define EIP97_CFSIZE_MASK			GENMASK(4, 0)
+#define EIP197_RFSIZE_OFFSET			12
+#define EIP197_RFSIZE_ADJUST			4
+#define EIP97_RFSIZE_OFFSET			12
+#define EIP197_RFSIZE_MASK			GENMASK(3, 0)
+#define EIP97_RFSIZE_MASK			GENMASK(4, 0)
+
+/* EIP197_HIA_AIC_R_ENABLE_CTRL */
+#define EIP197_CDR_IRQ(n)			BIT((n) * 2)
+#define EIP197_RDR_IRQ(n)			BIT((n) * 2 + 1)
+
+/* EIP197_HIA_DFE/DSE_CFG */
+#define EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(n)	((n) << 0)
+#define EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(n)	(((n) & 0x7) << 4)
+#define EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(n)	((n) << 8)
+#define EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE	GENMASK(15, 14)
+#define EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(n)	((n) << 16)
+#define EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(n)	(((n) & 0x7) << 20)
+#define EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(n)	((n) << 24)
+#define EIP197_HIA_DFE_CFG_DIS_DEBUG		(BIT(31) | BIT(29))
+#define EIP197_HIA_DSE_CFG_EN_SINGLE_WR		BIT(29)
+#define EIP197_HIA_DSE_CFG_DIS_DEBUG		BIT(31)
+
+/* EIP197_HIA_DFE/DSE_THR_CTRL */
+#define EIP197_DxE_THR_CTRL_EN			BIT(30)
+#define EIP197_DxE_THR_CTRL_RESET_PE		BIT(31)
+
+/* EIP197_PE_ICE_PUE/FPP_CTRL */
+#define EIP197_PE_ICE_UENG_START_OFFSET(n)	((n) << 16)
+#define EIP197_PE_ICE_UENG_INIT_ALIGN_MASK	0x7ff0
+#define EIP197_PE_ICE_UENG_DEBUG_RESET		BIT(3)
+
+/* EIP197_HIA_AIC_G_ENABLED_STAT */
+#define EIP197_G_IRQ_DFE(n)			BIT((n) << 1)
+#define EIP197_G_IRQ_DSE(n)			BIT(((n) << 1) + 1)
+#define EIP197_G_IRQ_RING			BIT(16)
+#define EIP197_G_IRQ_PE(n)			BIT((n) + 20)
+
+/* EIP197_HIA_MST_CTRL */
+#define RD_CACHE_3BITS				0x5
+#define WR_CACHE_3BITS				0x3
+#define RD_CACHE_4BITS				(RD_CACHE_3BITS << 1 | BIT(0))
+#define WR_CACHE_4BITS				(WR_CACHE_3BITS << 1 | BIT(0))
+#define EIP197_MST_CTRL_RD_CACHE(n)		(((n) & 0xf) << 0)
+#define EIP197_MST_CTRL_WD_CACHE(n)		(((n) & 0xf) << 4)
+#define EIP197_MST_CTRL_TX_MAX_CMD(n)		(((n) & 0xf) << 20)
+#define EIP197_MST_CTRL_BYTE_SWAP		BIT(24)
+#define EIP197_MST_CTRL_NO_BYTE_SWAP		BIT(25)
+#define EIP197_MST_CTRL_BYTE_SWAP_BITS          GENMASK(25, 24)
+
+/* EIP197_PE_IN_DBUF/TBUF_THRES */
+#define EIP197_PE_IN_xBUF_THRES_MIN(n)		((n) << 8)
+#define EIP197_PE_IN_xBUF_THRES_MAX(n)		((n) << 12)
+
+/* EIP197_PE_OUT_DBUF_THRES */
+#define EIP197_PE_OUT_DBUF_THRES_MIN(n)		((n) << 0)
+#define EIP197_PE_OUT_DBUF_THRES_MAX(n)		((n) << 4)
+
+/* EIP197_PE_ICE_SCRATCH_CTRL */
+#define EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER		BIT(2)
+#define EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN		BIT(3)
+#define EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS	BIT(24)
+#define EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS	BIT(25)
+
+/* EIP197_PE_ICE_SCRATCH_RAM */
+#define EIP197_NUM_OF_SCRATCH_BLOCKS		32
+
+/* EIP197_PE_ICE_PUE/FPP_CTRL */
+#define EIP197_PE_ICE_x_CTRL_SW_RESET			BIT(0)
+#define EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR		BIT(14)
+#define EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR		BIT(15)
+
+/* EIP197_PE_ICE_RAM_CTRL */
+#define EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN	BIT(0)
+#define EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN	BIT(1)
+
+/* EIP197_PE_EIP96_TOKEN_CTRL */
+#define EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES		BIT(16)
+#define EIP197_PE_EIP96_TOKEN_CTRL_NO_TOKEN_WAIT	BIT(17)
+#define EIP197_PE_EIP96_TOKEN_CTRL_ENABLE_TIMEOUT	BIT(22)
+
+/* EIP197_PE_EIP96_FUNCTION_EN */
+#define EIP197_FUNCTION_ALL			0xffffffff
+
+/* EIP197_PE_EIP96_CONTEXT_CTRL */
+#define EIP197_CONTEXT_SIZE(n)			(n)
+#define EIP197_ADDRESS_MODE			BIT(8)
+#define EIP197_CONTROL_MODE			BIT(9)
+
+/* EIP197_FLUE_CONFIG */
+#define EIP197_FLUE_CONFIG_MAGIC		0xc7000004
+
+/* Context Control */
+struct safexcel_context_record {
+	u32 control0;
+	u32 control1;
+
+	__le32 data[40];
+} __packed;
+
+/* control0 */
+#define CONTEXT_CONTROL_TYPE_NULL_OUT		0x0
+#define CONTEXT_CONTROL_TYPE_NULL_IN		0x1
+#define CONTEXT_CONTROL_TYPE_HASH_OUT		0x2
+#define CONTEXT_CONTROL_TYPE_HASH_IN		0x3
+#define CONTEXT_CONTROL_TYPE_CRYPTO_OUT		0x4
+#define CONTEXT_CONTROL_TYPE_CRYPTO_IN		0x5
+#define CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT	0x6
+#define CONTEXT_CONTROL_TYPE_DECRYPT_HASH_IN	0x7
+#define CONTEXT_CONTROL_TYPE_HASH_ENCRYPT_OUT	0xe
+#define CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN	0xf
+#define CONTEXT_CONTROL_RESTART_HASH		BIT(4)
+#define CONTEXT_CONTROL_NO_FINISH_HASH		BIT(5)
+#define CONTEXT_CONTROL_SIZE(n)			((n) << 8)
+#define CONTEXT_CONTROL_KEY_EN			BIT(16)
+#define CONTEXT_CONTROL_CRYPTO_ALG_DES		(0x0 << 17)
+#define CONTEXT_CONTROL_CRYPTO_ALG_3DES		(0x2 << 17)
+#define CONTEXT_CONTROL_CRYPTO_ALG_AES128	(0x5 << 17)
+#define CONTEXT_CONTROL_CRYPTO_ALG_AES192	(0x6 << 17)
+#define CONTEXT_CONTROL_CRYPTO_ALG_AES256	(0x7 << 17)
+#define CONTEXT_CONTROL_DIGEST_PRECOMPUTED	(0x1 << 21)
+#define CONTEXT_CONTROL_DIGEST_XCM		(0x2 << 21)
+#define CONTEXT_CONTROL_DIGEST_HMAC		(0x3 << 21)
+#define CONTEXT_CONTROL_CRYPTO_ALG_MD5		(0x0 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA1		(0x2 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA224	(0x4 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA256	(0x3 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA384	(0x6 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA512	(0x5 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_GHASH	(0x4 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_XCBC128	(0x1 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_XCBC192	(0x2 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_XCBC256	(0x3 << 23)
+#define CONTEXT_CONTROL_INV_FR			(0x5 << 24)
+#define CONTEXT_CONTROL_INV_TR			(0x6 << 24)
+
+/* control1 */
+#define CONTEXT_CONTROL_CRYPTO_MODE_ECB		(0 << 0)
+#define CONTEXT_CONTROL_CRYPTO_MODE_CBC		(1 << 0)
+#define CONTEXT_CONTROL_CRYPTO_MODE_OFB		(4 << 0)
+#define CONTEXT_CONTROL_CRYPTO_MODE_CFB		(5 << 0)
+#define CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD	(6 << 0)
+#define CONTEXT_CONTROL_CRYPTO_MODE_XTS		(7 << 0)
+#define CONTEXT_CONTROL_CRYPTO_MODE_XCM		((6 << 0) | BIT(17))
+#define CONTEXT_CONTROL_IV0			BIT(5)
+#define CONTEXT_CONTROL_IV1			BIT(6)
+#define CONTEXT_CONTROL_IV2			BIT(7)
+#define CONTEXT_CONTROL_IV3			BIT(8)
+#define CONTEXT_CONTROL_DIGEST_CNT		BIT(9)
+#define CONTEXT_CONTROL_COUNTER_MODE		BIT(10)
+#define CONTEXT_CONTROL_CRYPTO_STORE		BIT(12)
+#define CONTEXT_CONTROL_HASH_STORE		BIT(19)
+
+#define EIP197_XCM_MODE_GCM			1
+#define EIP197_XCM_MODE_CCM			2
+
+/* The hash counter given to the engine in the context has a granularity of
+ * 64 bits.
+ */
+#define EIP197_COUNTER_BLOCK_SIZE		64
+
+/* EIP197_CS_RAM_CTRL */
+#define EIP197_TRC_ENABLE_0			BIT(4)
+#define EIP197_TRC_ENABLE_1			BIT(5)
+#define EIP197_TRC_ENABLE_2			BIT(6)
+#define EIP197_TRC_ENABLE_MASK			GENMASK(6, 4)
+#define EIP197_CS_BANKSEL_MASK			GENMASK(14, 12)
+#define EIP197_CS_BANKSEL_OFS			12
+
+/* EIP197_TRC_PARAMS */
+#define EIP197_TRC_PARAMS_SW_RESET		BIT(0)
+#define EIP197_TRC_PARAMS_DATA_ACCESS		BIT(2)
+#define EIP197_TRC_PARAMS_HTABLE_SZ(x)		((x) << 4)
+#define EIP197_TRC_PARAMS_BLK_TIMER_SPEED(x)	((x) << 10)
+#define EIP197_TRC_PARAMS_RC_SZ_LARGE(n)	((n) << 18)
+
+/* EIP197_TRC_FREECHAIN */
+#define EIP197_TRC_FREECHAIN_HEAD_PTR(p)	(p)
+#define EIP197_TRC_FREECHAIN_TAIL_PTR(p)	((p) << 16)
+
+/* EIP197_TRC_PARAMS2 */
+#define EIP197_TRC_PARAMS2_HTABLE_PTR(p)	(p)
+#define EIP197_TRC_PARAMS2_RC_SZ_SMALL(n)	((n) << 18)
+
+/* Cache helpers */
+#define EIP197_CS_TRC_REC_WC			64
+#define EIP197_CS_RC_SIZE			(4 * sizeof(u32))
+#define EIP197_CS_RC_NEXT(x)			(x)
+#define EIP197_CS_RC_PREV(x)			((x) << 10)
+#define EIP197_RC_NULL				0x3ff
+
+/* Result data */
+struct result_data_desc {
+	u32 packet_length:17;
+	u32 error_code:15;
+
+	u8 bypass_length:4;
+	u8 e15:1;
+	u16 rsvd0;
+	u8 hash_bytes:1;
+	u8 hash_length:6;
+	u8 generic_bytes:1;
+	u8 checksum:1;
+	u8 next_header:1;
+	u8 length:1;
+
+	u16 application_id;
+	u16 rsvd1;
+
+	u32 rsvd2;
+} __packed;
+
+
+/* Basic Result Descriptor format */
+struct safexcel_result_desc {
+	u32 particle_size:17;
+	u8 rsvd0:3;
+	u8 descriptor_overflow:1;
+	u8 buffer_overflow:1;
+	u8 last_seg:1;
+	u8 first_seg:1;
+	u16 result_size:8;
+
+	u32 rsvd1;
+
+	u32 data_lo;
+	u32 data_hi;
+
+	struct result_data_desc result_data;
+} __packed;
+
+/*
+ * The EIP(1)97 only needs to fetch the descriptor part of
+ * the result descriptor, not the result token part!
+ */
+#define EIP197_RD64_FETCH_SIZE		((sizeof(struct safexcel_result_desc) -\
+					  sizeof(struct result_data_desc)) /\
+					 sizeof(u32))
+
+struct safexcel_token {
+	u32 packet_length:17;
+	u8 stat:2;
+	u16 instructions:9;
+	u8 opcode:4;
+} __packed;
+
+#define EIP197_TOKEN_HASH_RESULT_VERIFY		BIT(16)
+
+#define EIP197_TOKEN_CTX_OFFSET(x)		(x)
+#define EIP197_TOKEN_DIRECTION_EXTERNAL		BIT(11)
+#define EIP197_TOKEN_EXEC_IF_SUCCESSFUL		(0x1 << 12)
+
+#define EIP197_TOKEN_STAT_LAST_HASH		BIT(0)
+#define EIP197_TOKEN_STAT_LAST_PACKET		BIT(1)
+#define EIP197_TOKEN_OPCODE_DIRECTION		0x0
+#define EIP197_TOKEN_OPCODE_INSERT		0x2
+#define EIP197_TOKEN_OPCODE_NOOP		EIP197_TOKEN_OPCODE_INSERT
+#define EIP197_TOKEN_OPCODE_RETRIEVE		0x4
+#define EIP197_TOKEN_OPCODE_INSERT_REMRES	0xa
+#define EIP197_TOKEN_OPCODE_VERIFY		0xd
+#define EIP197_TOKEN_OPCODE_CTX_ACCESS		0xe
+#define EIP197_TOKEN_OPCODE_BYPASS		GENMASK(3, 0)
+
+static inline void eip197_noop_token(struct safexcel_token *token)
+{
+	token->opcode = EIP197_TOKEN_OPCODE_NOOP;
+	token->packet_length = BIT(2);
+}
+
+/* Instructions */
+#define EIP197_TOKEN_INS_INSERT_HASH_DIGEST	0x1c
+#define EIP197_TOKEN_INS_ORIGIN_IV0		0x14
+#define EIP197_TOKEN_INS_ORIGIN_TOKEN		0x1b
+#define EIP197_TOKEN_INS_ORIGIN_LEN(x)		((x) << 5)
+#define EIP197_TOKEN_INS_TYPE_OUTPUT		BIT(5)
+#define EIP197_TOKEN_INS_TYPE_HASH		BIT(6)
+#define EIP197_TOKEN_INS_TYPE_CRYPTO		BIT(7)
+#define EIP197_TOKEN_INS_LAST			BIT(8)
+
+/* Processing Engine Control Data  */
+struct safexcel_control_data_desc {
+	u32 packet_length:17;
+	u16 options:13;
+	u8 type:2;
+
+	u16 application_id;
+	u16 rsvd;
+
+	u8 refresh:2;
+	u32 context_lo:30;
+	u32 context_hi;
+
+	u32 control0;
+	u32 control1;
+
+	u32 token[EIP197_MAX_TOKENS];
+} __packed;
+
+#define EIP197_OPTION_MAGIC_VALUE	BIT(0)
+#define EIP197_OPTION_64BIT_CTX		BIT(1)
+#define EIP197_OPTION_RC_AUTO		(0x2 << 3)
+#define EIP197_OPTION_CTX_CTRL_IN_CMD	BIT(8)
+#define EIP197_OPTION_2_TOKEN_IV_CMD	GENMASK(11, 10)
+#define EIP197_OPTION_4_TOKEN_IV_CMD	GENMASK(11, 9)
+
+#define EIP197_TYPE_EXTENDED		0x3
+
+/* Basic Command Descriptor format */
+struct safexcel_command_desc {
+	u32 particle_size:17;
+	u8 rsvd0:5;
+	u8 last_seg:1;
+	u8 first_seg:1;
+	u16 additional_cdata_size:8;
+
+	u32 rsvd1;
+
+	u32 data_lo;
+	u32 data_hi;
+
+	struct safexcel_control_data_desc control_data;
+} __packed;
+
+/*
+ * Internal structures & functions
+ */
+
+#define EIP197_FW_TERMINAL_NOPS		2
+#define EIP197_FW_START_POLLCNT		16
+#define EIP197_FW_PUE_READY		0x14
+#define EIP197_FW_FPP_READY		0x18
+
+enum eip197_fw {
+	FW_IFPP = 0,
+	FW_IPUE,
+	FW_NB
+};
+
+struct safexcel_desc_ring {
+	void *base;
+	void *base_end;
+	dma_addr_t base_dma;
+
+	/* write and read pointers */
+	void *write;
+	void *read;
+
+	/* descriptor element offset */
+	unsigned offset;
+};
+
+enum safexcel_alg_type {
+	SAFEXCEL_ALG_TYPE_SKCIPHER,
+	SAFEXCEL_ALG_TYPE_AEAD,
+	SAFEXCEL_ALG_TYPE_AHASH,
+};
+
+struct safexcel_config {
+	u32 pes;
+	u32 rings;
+
+	u32 cd_size;
+	u32 cd_offset;
+
+	u32 rd_size;
+	u32 rd_offset;
+};
+
+struct safexcel_work_data {
+	struct work_struct work;
+	struct safexcel_crypto_priv *priv;
+	int ring;
+};
+
+struct safexcel_ring {
+	spinlock_t lock;
+
+	struct workqueue_struct *workqueue;
+	struct safexcel_work_data work_data;
+
+	/* command/result rings */
+	struct safexcel_desc_ring cdr;
+	struct safexcel_desc_ring rdr;
+
+	/* result ring crypto API request */
+	struct crypto_async_request **rdr_req;
+
+	/* queue */
+	struct crypto_queue queue;
+	spinlock_t queue_lock;
+
+	/* Number of requests in the engine. */
+	int requests;
+
+	/* The ring is currently handling at least one request */
+	bool busy;
+
+	/* Store for current requests when bailing out of the dequeueing
+	 * function when no enough resources are available.
+	 */
+	struct crypto_async_request *req;
+	struct crypto_async_request *backlog;
+
+	/* irq of this ring */
+	int irq;
+};
+
+/* EIP integration context flags */
+enum safexcel_eip_version {
+	/* Platform (EIP integration context) specifier */
+	EIP97IES_MRVL,
+	EIP197B_MRVL,
+	EIP197D_MRVL,
+	EIP197_DEVBRD
+};
+
+/* Priority we use for advertising our algorithms */
+#define SAFEXCEL_CRA_PRIORITY		300
+
+/* EIP algorithm presence flags */
+enum safexcel_eip_algorithms {
+	SAFEXCEL_ALG_BC0      = BIT(5),
+	SAFEXCEL_ALG_SM4      = BIT(6),
+	SAFEXCEL_ALG_SM3      = BIT(7),
+	SAFEXCEL_ALG_CHACHA20 = BIT(8),
+	SAFEXCEL_ALG_POLY1305 = BIT(9),
+	SAFEXCEL_SEQMASK_256   = BIT(10),
+	SAFEXCEL_SEQMASK_384   = BIT(11),
+	SAFEXCEL_ALG_AES      = BIT(12),
+	SAFEXCEL_ALG_AES_XFB  = BIT(13),
+	SAFEXCEL_ALG_DES      = BIT(15),
+	SAFEXCEL_ALG_DES_XFB  = BIT(16),
+	SAFEXCEL_ALG_ARC4     = BIT(18),
+	SAFEXCEL_ALG_AES_XTS  = BIT(20),
+	SAFEXCEL_ALG_WIRELESS = BIT(21),
+	SAFEXCEL_ALG_MD5      = BIT(22),
+	SAFEXCEL_ALG_SHA1     = BIT(23),
+	SAFEXCEL_ALG_SHA2_256 = BIT(25),
+	SAFEXCEL_ALG_SHA2_512 = BIT(26),
+	SAFEXCEL_ALG_XCBC_MAC = BIT(27),
+	SAFEXCEL_ALG_CBC_MAC_ALL = BIT(29),
+	SAFEXCEL_ALG_GHASH    = BIT(30),
+	SAFEXCEL_ALG_SHA3     = BIT(31),
+};
+
+struct safexcel_register_offsets {
+	u32 hia_aic;
+	u32 hia_aic_g;
+	u32 hia_aic_r;
+	u32 hia_aic_xdr;
+	u32 hia_dfe;
+	u32 hia_dfe_thr;
+	u32 hia_dse;
+	u32 hia_dse_thr;
+	u32 hia_gen_cfg;
+	u32 pe;
+	u32 global;
+};
+
+enum safexcel_flags {
+	EIP197_TRC_CACHE	= BIT(0),
+	SAFEXCEL_HW_EIP197	= BIT(1),
+};
+
+struct safexcel_hwconfig {
+	enum safexcel_eip_algorithms algo_flags;
+	int hwver;
+	int hiaver;
+	int pever;
+	int hwdataw;
+	int hwcfsize;
+	int hwrfsize;
+};
+
+struct safexcel_crypto_priv {
+	void __iomem *base;
+	struct device *dev;
+	struct clk *clk;
+	struct clk *reg_clk;
+	struct safexcel_config config;
+
+	enum safexcel_eip_version version;
+	struct safexcel_register_offsets offsets;
+	struct safexcel_hwconfig hwconfig;
+	u32 flags;
+
+	/* context DMA pool */
+	struct dma_pool *context_pool;
+
+	atomic_t ring_used;
+
+	struct safexcel_ring *ring;
+};
+
+struct safexcel_context {
+	int (*send)(struct crypto_async_request *req, int ring,
+		    int *commands, int *results);
+	int (*handle_result)(struct safexcel_crypto_priv *priv, int ring,
+			     struct crypto_async_request *req, bool *complete,
+			     int *ret);
+	struct safexcel_context_record *ctxr;
+	dma_addr_t ctxr_dma;
+
+	int ring;
+	bool needs_inv;
+	bool exit_inv;
+};
+
+#define HASH_CACHE_SIZE			SHA512_BLOCK_SIZE
+
+struct safexcel_ahash_export_state {
+	u64 len;
+	u64 processed;
+
+	u32 digest;
+
+	u32 state[SHA512_DIGEST_SIZE / sizeof(u32)];
+	u8 cache[HASH_CACHE_SIZE];
+};
+
+/*
+ * Template structure to describe the algorithms in order to register them.
+ * It also has the purpose to contain our private structure and is actually
+ * the only way I know in this framework to avoid having global pointers...
+ */
+struct safexcel_alg_template {
+	struct safexcel_crypto_priv *priv;
+	enum safexcel_alg_type type;
+	enum safexcel_eip_algorithms algo_mask;
+	union {
+		struct skcipher_alg skcipher;
+		struct aead_alg aead;
+		struct ahash_alg ahash;
+	} alg;
+};
+
+struct safexcel_inv_result {
+	struct completion completion;
+	int error;
+};
+
+void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring);
+int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
+				struct safexcel_result_desc *rdesc);
+void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
+int safexcel_invalidate_cache(struct crypto_async_request *async,
+			      struct safexcel_crypto_priv *priv,
+			      dma_addr_t ctxr_dma, int ring);
+int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
+				   struct safexcel_desc_ring *cdr,
+				   struct safexcel_desc_ring *rdr);
+int safexcel_select_ring(struct safexcel_crypto_priv *priv);
+void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
+			      struct safexcel_desc_ring *ring);
+void *safexcel_ring_first_rptr(struct safexcel_crypto_priv *priv, int  ring);
+void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
+				 struct safexcel_desc_ring *ring);
+struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
+						 int ring_id,
+						 bool first, bool last,
+						 dma_addr_t data, u32 len,
+						 u32 full_data_len,
+						 dma_addr_t context);
+struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *priv,
+						 int ring_id,
+						bool first, bool last,
+						dma_addr_t data, u32 len);
+int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv,
+				  int ring);
+int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv,
+				  int ring,
+				  struct safexcel_result_desc *rdesc);
+void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
+			  int ring,
+			  struct safexcel_result_desc *rdesc,
+			  struct crypto_async_request *req);
+inline struct crypto_async_request *
+safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring);
+void safexcel_inv_complete(struct crypto_async_request *req, int error);
+int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
+			 void *istate, void *ostate);
+
+/* available algorithms */
+extern struct safexcel_alg_template safexcel_alg_ecb_des;
+extern struct safexcel_alg_template safexcel_alg_cbc_des;
+extern struct safexcel_alg_template safexcel_alg_ecb_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_cbc_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_ecb_aes;
+extern struct safexcel_alg_template safexcel_alg_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_cfb_aes;
+extern struct safexcel_alg_template safexcel_alg_ofb_aes;
+extern struct safexcel_alg_template safexcel_alg_ctr_aes;
+extern struct safexcel_alg_template safexcel_alg_md5;
+extern struct safexcel_alg_template safexcel_alg_sha1;
+extern struct safexcel_alg_template safexcel_alg_sha224;
+extern struct safexcel_alg_template safexcel_alg_sha256;
+extern struct safexcel_alg_template safexcel_alg_sha384;
+extern struct safexcel_alg_template safexcel_alg_sha512;
+extern struct safexcel_alg_template safexcel_alg_hmac_md5;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha1;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha224;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha256;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha384;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha512;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_ctr_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_ctr_aes;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes;
+extern struct safexcel_alg_template safexcel_alg_xts_aes;
+extern struct safexcel_alg_template safexcel_alg_gcm;
+extern struct safexcel_alg_template safexcel_alg_ccm;
+
+#endif
diff --git a/marvell/linux/drivers/crypto/inside-secure/safexcel_cipher.c b/marvell/linux/drivers/crypto/inside-secure/safexcel_cipher.c
new file mode 100644
index 0000000..ef51f8c
--- /dev/null
+++ b/marvell/linux/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -0,0 +1,2305 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Marvell
+ *
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/authenc.h>
+#include <crypto/ctr.h>
+#include <crypto/internal/des.h>
+#include <crypto/gcm.h>
+#include <crypto/ghash.h>
+#include <crypto/sha.h>
+#include <crypto/xts.h>
+#include <crypto/skcipher.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+
+#include "safexcel.h"
+
+enum safexcel_cipher_direction {
+	SAFEXCEL_ENCRYPT,
+	SAFEXCEL_DECRYPT,
+};
+
+enum safexcel_cipher_alg {
+	SAFEXCEL_DES,
+	SAFEXCEL_3DES,
+	SAFEXCEL_AES,
+};
+
+struct safexcel_cipher_ctx {
+	struct safexcel_context base;
+	struct safexcel_crypto_priv *priv;
+
+	u32 mode;
+	enum safexcel_cipher_alg alg;
+	bool aead;
+	int  xcm; /* 0=authenc, 1=GCM, 2 reserved for CCM */
+
+	__le32 key[16];
+	u32 nonce;
+	unsigned int key_len, xts;
+
+	/* All the below is AEAD specific */
+	u32 hash_alg;
+	u32 state_sz;
+	u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
+	u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
+
+	struct crypto_cipher *hkaes;
+};
+
+struct safexcel_cipher_req {
+	enum safexcel_cipher_direction direction;
+	/* Number of result descriptors associated to the request */
+	unsigned int rdescs;
+	bool needs_inv;
+	int  nr_src, nr_dst;
+};
+
+static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
+				  struct safexcel_command_desc *cdesc)
+{
+	u32 block_sz = 0;
+
+	if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) {
+		cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+
+		/* 32 bit nonce */
+		cdesc->control_data.token[0] = ctx->nonce;
+		/* 64 bit IV part */
+		memcpy(&cdesc->control_data.token[1], iv, 8);
+		/* 32 bit counter, start at 1 (big endian!) */
+		cdesc->control_data.token[3] = cpu_to_be32(1);
+
+		return;
+	} else if (ctx->xcm == EIP197_XCM_MODE_GCM) {
+		cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+
+		/* 96 bit IV part */
+		memcpy(&cdesc->control_data.token[0], iv, 12);
+		/* 32 bit counter, start at 1 (big endian!) */
+		cdesc->control_data.token[3] = cpu_to_be32(1);
+
+		return;
+	} else if (ctx->xcm == EIP197_XCM_MODE_CCM) {
+		cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+
+		/* Variable length IV part */
+		memcpy(&cdesc->control_data.token[0], iv, 15 - iv[0]);
+		/* Start variable length counter at 0 */
+		memset((u8 *)&cdesc->control_data.token[0] + 15 - iv[0],
+		       0, iv[0] + 1);
+
+		return;
+	}
+
+	if (ctx->mode != CONTEXT_CONTROL_CRYPTO_MODE_ECB) {
+		switch (ctx->alg) {
+		case SAFEXCEL_DES:
+			block_sz = DES_BLOCK_SIZE;
+			cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
+			break;
+		case SAFEXCEL_3DES:
+			block_sz = DES3_EDE_BLOCK_SIZE;
+			cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
+			break;
+		case SAFEXCEL_AES:
+			block_sz = AES_BLOCK_SIZE;
+			cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+			break;
+		}
+		memcpy(cdesc->control_data.token, iv, block_sz);
+	}
+}
+
+static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
+				    struct safexcel_command_desc *cdesc,
+				    u32 length)
+{
+	struct safexcel_token *token;
+
+	safexcel_cipher_token(ctx, iv, cdesc);
+
+	/* skip over worst case IV of 4 dwords, no need to be exact */
+	token = (struct safexcel_token *)(cdesc->control_data.token + 4);
+
+	token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+	token[0].packet_length = length;
+	token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET |
+			EIP197_TOKEN_STAT_LAST_HASH;
+	token[0].instructions = EIP197_TOKEN_INS_LAST |
+				EIP197_TOKEN_INS_TYPE_CRYPTO |
+				EIP197_TOKEN_INS_TYPE_OUTPUT;
+}
+
+static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
+				struct safexcel_command_desc *cdesc,
+				enum safexcel_cipher_direction direction,
+				u32 cryptlen, u32 assoclen, u32 digestsize)
+{
+	struct safexcel_token *token;
+
+	safexcel_cipher_token(ctx, iv, cdesc);
+
+	if (direction == SAFEXCEL_ENCRYPT) {
+		/* align end of instruction sequence to end of token */
+		token = (struct safexcel_token *)(cdesc->control_data.token +
+			 EIP197_MAX_TOKENS - 13);
+
+		token[12].opcode = EIP197_TOKEN_OPCODE_INSERT;
+		token[12].packet_length = digestsize;
+		token[12].stat = EIP197_TOKEN_STAT_LAST_HASH |
+				 EIP197_TOKEN_STAT_LAST_PACKET;
+		token[12].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+					 EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+	} else {
+		cryptlen -= digestsize;
+
+		/* align end of instruction sequence to end of token */
+		token = (struct safexcel_token *)(cdesc->control_data.token +
+			 EIP197_MAX_TOKENS - 14);
+
+		token[12].opcode = EIP197_TOKEN_OPCODE_RETRIEVE;
+		token[12].packet_length = digestsize;
+		token[12].stat = EIP197_TOKEN_STAT_LAST_HASH |
+				 EIP197_TOKEN_STAT_LAST_PACKET;
+		token[12].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+
+		token[13].opcode = EIP197_TOKEN_OPCODE_VERIFY;
+		token[13].packet_length = digestsize |
+					  EIP197_TOKEN_HASH_RESULT_VERIFY;
+		token[13].stat = EIP197_TOKEN_STAT_LAST_HASH |
+				 EIP197_TOKEN_STAT_LAST_PACKET;
+		token[13].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT;
+	}
+
+	token[6].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+	token[6].packet_length = assoclen;
+
+	if (likely(cryptlen)) {
+		token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+
+		token[10].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+		token[10].packet_length = cryptlen;
+		token[10].stat = EIP197_TOKEN_STAT_LAST_HASH;
+		token[10].instructions = EIP197_TOKEN_INS_LAST |
+					 EIP197_TOKEN_INS_TYPE_CRYPTO |
+					 EIP197_TOKEN_INS_TYPE_HASH |
+					 EIP197_TOKEN_INS_TYPE_OUTPUT;
+	} else if (ctx->xcm != EIP197_XCM_MODE_CCM) {
+		token[6].stat = EIP197_TOKEN_STAT_LAST_HASH;
+		token[6].instructions = EIP197_TOKEN_INS_LAST |
+					EIP197_TOKEN_INS_TYPE_HASH;
+	}
+
+	if (!ctx->xcm)
+		return;
+
+	token[8].opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES;
+	token[8].packet_length = 0;
+	token[8].instructions = AES_BLOCK_SIZE;
+
+	token[9].opcode = EIP197_TOKEN_OPCODE_INSERT;
+	token[9].packet_length = AES_BLOCK_SIZE;
+	token[9].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+				EIP197_TOKEN_INS_TYPE_CRYPTO;
+
+	if (ctx->xcm == EIP197_XCM_MODE_GCM) {
+		token[6].instructions = EIP197_TOKEN_INS_LAST |
+					EIP197_TOKEN_INS_TYPE_HASH;
+	} else {
+		u8 *cbcmaciv = (u8 *)&token[1];
+		u32 *aadlen = (u32 *)&token[5];
+
+		/* Construct IV block B0 for the CBC-MAC */
+		token[0].opcode = EIP197_TOKEN_OPCODE_INSERT;
+		token[0].packet_length = AES_BLOCK_SIZE +
+					 ((assoclen > 0) << 1);
+		token[0].instructions = EIP197_TOKEN_INS_ORIGIN_TOKEN |
+					EIP197_TOKEN_INS_TYPE_HASH;
+		/* Variable length IV part */
+		memcpy(cbcmaciv, iv, 15 - iv[0]);
+		/* fixup flags byte */
+		cbcmaciv[0] |= ((assoclen > 0) << 6) | ((digestsize - 2) << 2);
+		/* Clear upper bytes of variable message length to 0 */
+		memset(cbcmaciv + 15 - iv[0], 0, iv[0] - 1);
+		/* insert lower 2 bytes of message length */
+		cbcmaciv[14] = cryptlen >> 8;
+		cbcmaciv[15] = cryptlen & 255;
+
+		if (assoclen) {
+			*aadlen = cpu_to_le32(cpu_to_be16(assoclen));
+			assoclen += 2;
+		}
+
+		token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+
+		/* Align AAD data towards hash engine */
+		token[7].opcode = EIP197_TOKEN_OPCODE_INSERT;
+		assoclen &= 15;
+		token[7].packet_length = assoclen ? 16 - assoclen : 0;
+
+		if (likely(cryptlen)) {
+			token[7].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+
+			/* Align crypto data towards hash engine */
+			token[10].stat = 0;
+
+			token[11].opcode = EIP197_TOKEN_OPCODE_INSERT;
+			cryptlen &= 15;
+			token[11].packet_length = cryptlen ? 16 - cryptlen : 0;
+			token[11].stat = EIP197_TOKEN_STAT_LAST_HASH;
+			token[11].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+		} else {
+			token[7].stat = EIP197_TOKEN_STAT_LAST_HASH;
+			token[7].instructions = EIP197_TOKEN_INS_LAST |
+						EIP197_TOKEN_INS_TYPE_HASH;
+		}
+	}
+}
+
+static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
+					const u8 *key, unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	struct crypto_aes_ctx aes;
+	int ret, i;
+
+	ret = aes_expandkey(&aes, key, len);
+	if (ret) {
+		crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return ret;
+	}
+
+	if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
+		for (i = 0; i < len / sizeof(u32); i++) {
+			if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+				ctx->base.needs_inv = true;
+				break;
+			}
+		}
+	}
+
+	for (i = 0; i < len / sizeof(u32); i++)
+		ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
+
+	ctx->key_len = len;
+
+	memzero_explicit(&aes, sizeof(aes));
+	return 0;
+}
+
+static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
+				unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_ahash_export_state istate, ostate;
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	struct crypto_authenc_keys keys;
+	struct crypto_aes_ctx aes;
+	int err = -EINVAL;
+
+	if (crypto_authenc_extractkeys(&keys, key, len) != 0)
+		goto badkey;
+
+	if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) {
+		/* Minimum keysize is minimum AES key size + nonce size */
+		if (keys.enckeylen < (AES_MIN_KEY_SIZE +
+				      CTR_RFC3686_NONCE_SIZE))
+			goto badkey;
+		/* last 4 bytes of key are the nonce! */
+		ctx->nonce = *(u32 *)(keys.enckey + keys.enckeylen -
+				      CTR_RFC3686_NONCE_SIZE);
+		/* exclude the nonce here */
+		keys.enckeylen -= CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
+	}
+
+	/* Encryption key */
+	switch (ctx->alg) {
+	case SAFEXCEL_3DES:
+		err = verify_aead_des3_key(ctfm, keys.enckey, keys.enckeylen);
+		if (unlikely(err))
+			goto badkey_expflags;
+		break;
+	case SAFEXCEL_AES:
+		err = aes_expandkey(&aes, keys.enckey, keys.enckeylen);
+		if (unlikely(err))
+			goto badkey;
+		break;
+	default:
+		dev_err(priv->dev, "aead: unsupported cipher algorithm\n");
+		goto badkey;
+	}
+
+	if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
+	    memcmp(ctx->key, keys.enckey, keys.enckeylen))
+		ctx->base.needs_inv = true;
+
+	/* Auth key */
+	switch (ctx->hash_alg) {
+	case CONTEXT_CONTROL_CRYPTO_ALG_SHA1:
+		if (safexcel_hmac_setkey("safexcel-sha1", keys.authkey,
+					 keys.authkeylen, &istate, &ostate))
+			goto badkey;
+		break;
+	case CONTEXT_CONTROL_CRYPTO_ALG_SHA224:
+		if (safexcel_hmac_setkey("safexcel-sha224", keys.authkey,
+					 keys.authkeylen, &istate, &ostate))
+			goto badkey;
+		break;
+	case CONTEXT_CONTROL_CRYPTO_ALG_SHA256:
+		if (safexcel_hmac_setkey("safexcel-sha256", keys.authkey,
+					 keys.authkeylen, &istate, &ostate))
+			goto badkey;
+		break;
+	case CONTEXT_CONTROL_CRYPTO_ALG_SHA384:
+		if (safexcel_hmac_setkey("safexcel-sha384", keys.authkey,
+					 keys.authkeylen, &istate, &ostate))
+			goto badkey;
+		break;
+	case CONTEXT_CONTROL_CRYPTO_ALG_SHA512:
+		if (safexcel_hmac_setkey("safexcel-sha512", keys.authkey,
+					 keys.authkeylen, &istate, &ostate))
+			goto badkey;
+		break;
+	default:
+		dev_err(priv->dev, "aead: unsupported hash algorithm\n");
+		goto badkey;
+	}
+
+	crypto_aead_set_flags(ctfm, crypto_aead_get_flags(ctfm) &
+				    CRYPTO_TFM_RES_MASK);
+
+	if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
+	    (memcmp(ctx->ipad, istate.state, ctx->state_sz) ||
+	     memcmp(ctx->opad, ostate.state, ctx->state_sz)))
+		ctx->base.needs_inv = true;
+
+	/* Now copy the keys into the context */
+	memcpy(ctx->key, keys.enckey, keys.enckeylen);
+	ctx->key_len = keys.enckeylen;
+
+	memcpy(ctx->ipad, &istate.state, ctx->state_sz);
+	memcpy(ctx->opad, &ostate.state, ctx->state_sz);
+
+	memzero_explicit(&keys, sizeof(keys));
+	return 0;
+
+badkey:
+	crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+badkey_expflags:
+	memzero_explicit(&keys, sizeof(keys));
+	return err;
+}
+
+static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
+				    struct crypto_async_request *async,
+				    struct safexcel_cipher_req *sreq,
+				    struct safexcel_command_desc *cdesc)
+{
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ctrl_size = ctx->key_len / sizeof(u32);
+
+	cdesc->control_data.control1 = ctx->mode;
+
+	if (ctx->aead) {
+		/* Take in account the ipad+opad digests */
+		if (ctx->xcm) {
+			ctrl_size += ctx->state_sz / sizeof(u32);
+			cdesc->control_data.control0 =
+				CONTEXT_CONTROL_KEY_EN |
+				CONTEXT_CONTROL_DIGEST_XCM |
+				ctx->hash_alg |
+				CONTEXT_CONTROL_SIZE(ctrl_size);
+		} else {
+			ctrl_size += ctx->state_sz / sizeof(u32) * 2;
+			cdesc->control_data.control0 =
+				CONTEXT_CONTROL_KEY_EN |
+				CONTEXT_CONTROL_DIGEST_HMAC |
+				ctx->hash_alg |
+				CONTEXT_CONTROL_SIZE(ctrl_size);
+		}
+		if (sreq->direction == SAFEXCEL_ENCRYPT)
+			cdesc->control_data.control0 |=
+				(ctx->xcm == EIP197_XCM_MODE_CCM) ?
+					CONTEXT_CONTROL_TYPE_HASH_ENCRYPT_OUT :
+					CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT;
+
+		else
+			cdesc->control_data.control0 |=
+				(ctx->xcm == EIP197_XCM_MODE_CCM) ?
+					CONTEXT_CONTROL_TYPE_DECRYPT_HASH_IN :
+					CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN;
+	} else {
+		if (sreq->direction == SAFEXCEL_ENCRYPT)
+			cdesc->control_data.control0 =
+				CONTEXT_CONTROL_TYPE_CRYPTO_OUT |
+				CONTEXT_CONTROL_KEY_EN |
+				CONTEXT_CONTROL_SIZE(ctrl_size);
+		else
+			cdesc->control_data.control0 =
+				CONTEXT_CONTROL_TYPE_CRYPTO_IN |
+				CONTEXT_CONTROL_KEY_EN |
+				CONTEXT_CONTROL_SIZE(ctrl_size);
+	}
+
+	if (ctx->alg == SAFEXCEL_DES) {
+		cdesc->control_data.control0 |=
+			CONTEXT_CONTROL_CRYPTO_ALG_DES;
+	} else if (ctx->alg == SAFEXCEL_3DES) {
+		cdesc->control_data.control0 |=
+			CONTEXT_CONTROL_CRYPTO_ALG_3DES;
+	} else if (ctx->alg == SAFEXCEL_AES) {
+		switch (ctx->key_len >> ctx->xts) {
+		case AES_KEYSIZE_128:
+			cdesc->control_data.control0 |=
+				CONTEXT_CONTROL_CRYPTO_ALG_AES128;
+			break;
+		case AES_KEYSIZE_192:
+			cdesc->control_data.control0 |=
+				CONTEXT_CONTROL_CRYPTO_ALG_AES192;
+			break;
+		case AES_KEYSIZE_256:
+			cdesc->control_data.control0 |=
+				CONTEXT_CONTROL_CRYPTO_ALG_AES256;
+			break;
+		default:
+			dev_err(priv->dev, "aes keysize not supported: %u\n",
+				ctx->key_len >> ctx->xts);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
+				      struct crypto_async_request *async,
+				      struct scatterlist *src,
+				      struct scatterlist *dst,
+				      unsigned int cryptlen,
+				      struct safexcel_cipher_req *sreq,
+				      bool *should_complete, int *ret)
+{
+	struct skcipher_request *areq = skcipher_request_cast(async);
+	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(areq);
+	struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(skcipher);
+	struct safexcel_result_desc *rdesc;
+	int ndesc = 0;
+
+	*ret = 0;
+
+	if (unlikely(!sreq->rdescs))
+		return 0;
+
+	while (sreq->rdescs--) {
+		rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
+		if (IS_ERR(rdesc)) {
+			dev_err(priv->dev,
+				"cipher: result: could not retrieve the result descriptor\n");
+			*ret = PTR_ERR(rdesc);
+			break;
+		}
+
+		if (likely(!*ret))
+			*ret = safexcel_rdesc_check_errors(priv, rdesc);
+
+		ndesc++;
+	}
+
+	safexcel_complete(priv, ring);
+
+	if (src == dst) {
+		dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
+	} else {
+		dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
+		dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
+	}
+
+	/*
+	 * Update IV in req from last crypto output word for CBC modes
+	 */
+	if ((!ctx->aead) && (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) &&
+	    (sreq->direction == SAFEXCEL_ENCRYPT)) {
+		/* For encrypt take the last output word */
+		sg_pcopy_to_buffer(dst, sreq->nr_dst, areq->iv,
+				   crypto_skcipher_ivsize(skcipher),
+				   (cryptlen -
+				    crypto_skcipher_ivsize(skcipher)));
+	}
+
+	*should_complete = true;
+
+	return ndesc;
+}
+
+static int safexcel_send_req(struct crypto_async_request *base, int ring,
+			     struct safexcel_cipher_req *sreq,
+			     struct scatterlist *src, struct scatterlist *dst,
+			     unsigned int cryptlen, unsigned int assoclen,
+			     unsigned int digestsize, u8 *iv, int *commands,
+			     int *results)
+{
+	struct skcipher_request *areq = skcipher_request_cast(base);
+	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(areq);
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	struct safexcel_command_desc *cdesc;
+	struct safexcel_command_desc *first_cdesc = NULL;
+	struct safexcel_result_desc *rdesc, *first_rdesc = NULL;
+	struct scatterlist *sg;
+	unsigned int totlen;
+	unsigned int totlen_src = cryptlen + assoclen;
+	unsigned int totlen_dst = totlen_src;
+	int n_cdesc = 0, n_rdesc = 0;
+	int queued, i, ret = 0;
+	bool first = true;
+
+	sreq->nr_src = sg_nents_for_len(src, totlen_src);
+
+	if (ctx->aead) {
+		/*
+		 * AEAD has auth tag appended to output for encrypt and
+		 * removed from the output for decrypt!
+		 */
+		if (sreq->direction == SAFEXCEL_DECRYPT)
+			totlen_dst -= digestsize;
+		else
+			totlen_dst += digestsize;
+
+		memcpy(ctx->base.ctxr->data + ctx->key_len / sizeof(u32),
+		       ctx->ipad, ctx->state_sz);
+		if (!ctx->xcm)
+			memcpy(ctx->base.ctxr->data + (ctx->key_len +
+			       ctx->state_sz) / sizeof(u32), ctx->opad,
+			       ctx->state_sz);
+	} else if ((ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) &&
+		   (sreq->direction == SAFEXCEL_DECRYPT)) {
+		/*
+		 * Save IV from last crypto input word for CBC modes in decrypt
+		 * direction. Need to do this first in case of inplace operation
+		 * as it will be overwritten.
+		 */
+		sg_pcopy_to_buffer(src, sreq->nr_src, areq->iv,
+				   crypto_skcipher_ivsize(skcipher),
+				   (totlen_src -
+				    crypto_skcipher_ivsize(skcipher)));
+	}
+
+	sreq->nr_dst = sg_nents_for_len(dst, totlen_dst);
+
+	/*
+	 * Remember actual input length, source buffer length may be
+	 * updated in case of inline operation below.
+	 */
+	totlen = totlen_src;
+	queued = totlen_src;
+
+	if (src == dst) {
+		sreq->nr_src = max(sreq->nr_src, sreq->nr_dst);
+		sreq->nr_dst = sreq->nr_src;
+		if (unlikely((totlen_src || totlen_dst) &&
+		    (sreq->nr_src <= 0))) {
+			dev_err(priv->dev, "In-place buffer not large enough (need %d bytes)!",
+				max(totlen_src, totlen_dst));
+			return -EINVAL;
+		}
+		dma_map_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
+	} else {
+		if (unlikely(totlen_src && (sreq->nr_src <= 0))) {
+			dev_err(priv->dev, "Source buffer not large enough (need %d bytes)!",
+				totlen_src);
+			return -EINVAL;
+		}
+		dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
+
+		if (unlikely(totlen_dst && (sreq->nr_dst <= 0))) {
+			dev_err(priv->dev, "Dest buffer not large enough (need %d bytes)!",
+				totlen_dst);
+			dma_unmap_sg(priv->dev, src, sreq->nr_src,
+				     DMA_TO_DEVICE);
+			return -EINVAL;
+		}
+		dma_map_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
+	}
+
+	memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
+
+	/* The EIP cannot deal with zero length input packets! */
+	if (totlen == 0)
+		totlen = 1;
+
+	/* command descriptors */
+	for_each_sg(src, sg, sreq->nr_src, i) {
+		int len = sg_dma_len(sg);
+
+		/* Do not overflow the request */
+		if (queued - len < 0)
+			len = queued;
+
+		cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
+					   !(queued - len),
+					   sg_dma_address(sg), len, totlen,
+					   ctx->base.ctxr_dma);
+		if (IS_ERR(cdesc)) {
+			/* No space left in the command descriptor ring */
+			ret = PTR_ERR(cdesc);
+			goto cdesc_rollback;
+		}
+		n_cdesc++;
+
+		if (n_cdesc == 1) {
+			first_cdesc = cdesc;
+		}
+
+		queued -= len;
+		if (!queued)
+			break;
+	}
+
+	if (unlikely(!n_cdesc)) {
+		/*
+		 * Special case: zero length input buffer.
+		 * The engine always needs the 1st command descriptor, however!
+		 */
+		first_cdesc = safexcel_add_cdesc(priv, ring, 1, 1, 0, 0, totlen,
+						 ctx->base.ctxr_dma);
+		n_cdesc = 1;
+	}
+
+	/* Add context control words and token to first command descriptor */
+	safexcel_context_control(ctx, base, sreq, first_cdesc);
+	if (ctx->aead)
+		safexcel_aead_token(ctx, iv, first_cdesc,
+				    sreq->direction, cryptlen,
+				    assoclen, digestsize);
+	else
+		safexcel_skcipher_token(ctx, iv, first_cdesc,
+					cryptlen);
+
+	/* result descriptors */
+	for_each_sg(dst, sg, sreq->nr_dst, i) {
+		bool last = (i == sreq->nr_dst - 1);
+		u32 len = sg_dma_len(sg);
+
+		/* only allow the part of the buffer we know we need */
+		if (len > totlen_dst)
+			len = totlen_dst;
+		if (unlikely(!len))
+			break;
+		totlen_dst -= len;
+
+		/* skip over AAD space in buffer - not written */
+		if (assoclen) {
+			if (assoclen >= len) {
+				assoclen -= len;
+				continue;
+			}
+			rdesc = safexcel_add_rdesc(priv, ring, first, last,
+						   sg_dma_address(sg) +
+						   assoclen,
+						   len - assoclen);
+			assoclen = 0;
+		} else {
+			rdesc = safexcel_add_rdesc(priv, ring, first, last,
+						   sg_dma_address(sg),
+						   len);
+		}
+		if (IS_ERR(rdesc)) {
+			/* No space left in the result descriptor ring */
+			ret = PTR_ERR(rdesc);
+			goto rdesc_rollback;
+		}
+		if (first) {
+			first_rdesc = rdesc;
+			first = false;
+		}
+		n_rdesc++;
+	}
+
+	if (unlikely(first)) {
+		/*
+		 * Special case: AEAD decrypt with only AAD data.
+		 * In this case there is NO output data from the engine,
+		 * but the engine still needs a result descriptor!
+		 * Create a dummy one just for catching the result token.
+		 */
+		rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
+		if (IS_ERR(rdesc)) {
+			/* No space left in the result descriptor ring */
+			ret = PTR_ERR(rdesc);
+			goto rdesc_rollback;
+		}
+		first_rdesc = rdesc;
+		n_rdesc = 1;
+	}
+
+	safexcel_rdr_req_set(priv, ring, first_rdesc, base);
+
+	*commands = n_cdesc;
+	*results = n_rdesc;
+	return 0;
+
+rdesc_rollback:
+	for (i = 0; i < n_rdesc; i++)
+		safexcel_ring_rollback_wptr(priv, &priv->ring[ring].rdr);
+cdesc_rollback:
+	for (i = 0; i < n_cdesc; i++)
+		safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
+
+	if (src == dst) {
+		dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
+	} else {
+		dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
+		dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
+	}
+
+	return ret;
+}
+
+static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
+				      int ring,
+				      struct crypto_async_request *base,
+				      struct safexcel_cipher_req *sreq,
+				      bool *should_complete, int *ret)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
+	struct safexcel_result_desc *rdesc;
+	int ndesc = 0, enq_ret;
+
+	*ret = 0;
+
+	if (unlikely(!sreq->rdescs))
+		return 0;
+
+	while (sreq->rdescs--) {
+		rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
+		if (IS_ERR(rdesc)) {
+			dev_err(priv->dev,
+				"cipher: invalidate: could not retrieve the result descriptor\n");
+			*ret = PTR_ERR(rdesc);
+			break;
+		}
+
+		if (likely(!*ret))
+			*ret = safexcel_rdesc_check_errors(priv, rdesc);
+
+		ndesc++;
+	}
+
+	safexcel_complete(priv, ring);
+
+	if (ctx->base.exit_inv) {
+		dma_pool_free(priv->context_pool, ctx->base.ctxr,
+			      ctx->base.ctxr_dma);
+
+		*should_complete = true;
+
+		return ndesc;
+	}
+
+	ring = safexcel_select_ring(priv);
+	ctx->base.ring = ring;
+
+	spin_lock_bh(&priv->ring[ring].queue_lock);
+	enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, base);
+	spin_unlock_bh(&priv->ring[ring].queue_lock);
+
+	if (enq_ret != -EINPROGRESS)
+		*ret = enq_ret;
+
+	queue_work(priv->ring[ring].workqueue,
+		   &priv->ring[ring].work_data.work);
+
+	*should_complete = false;
+
+	return ndesc;
+}
+
+static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv,
+					   int ring,
+					   struct crypto_async_request *async,
+					   bool *should_complete, int *ret)
+{
+	struct skcipher_request *req = skcipher_request_cast(async);
+	struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+	int err;
+
+	if (sreq->needs_inv) {
+		sreq->needs_inv = false;
+		err = safexcel_handle_inv_result(priv, ring, async, sreq,
+						 should_complete, ret);
+	} else {
+		err = safexcel_handle_req_result(priv, ring, async, req->src,
+						 req->dst, req->cryptlen, sreq,
+						 should_complete, ret);
+	}
+
+	return err;
+}
+
+static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv,
+				       int ring,
+				       struct crypto_async_request *async,
+				       bool *should_complete, int *ret)
+{
+	struct aead_request *req = aead_request_cast(async);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct safexcel_cipher_req *sreq = aead_request_ctx(req);
+	int err;
+
+	if (sreq->needs_inv) {
+		sreq->needs_inv = false;
+		err = safexcel_handle_inv_result(priv, ring, async, sreq,
+						 should_complete, ret);
+	} else {
+		err = safexcel_handle_req_result(priv, ring, async, req->src,
+						 req->dst,
+						 req->cryptlen + crypto_aead_authsize(tfm),
+						 sreq, should_complete, ret);
+	}
+
+	return err;
+}
+
+static int safexcel_cipher_send_inv(struct crypto_async_request *base,
+				    int ring, int *commands, int *results)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ret;
+
+	ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring);
+	if (unlikely(ret))
+		return ret;
+
+	*commands = 1;
+	*results = 1;
+
+	return 0;
+}
+
+static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
+				  int *commands, int *results)
+{
+	struct skcipher_request *req = skcipher_request_cast(async);
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ret;
+
+	BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
+
+	if (sreq->needs_inv) {
+		ret = safexcel_cipher_send_inv(async, ring, commands, results);
+	} else {
+		struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+		u8 input_iv[AES_BLOCK_SIZE];
+
+		/*
+		 * Save input IV in case of CBC decrypt mode
+		 * Will be overwritten with output IV prior to use!
+		 */
+		memcpy(input_iv, req->iv, crypto_skcipher_ivsize(skcipher));
+
+		ret = safexcel_send_req(async, ring, sreq, req->src,
+					req->dst, req->cryptlen, 0, 0, input_iv,
+					commands, results);
+	}
+
+	sreq->rdescs = *results;
+	return ret;
+}
+
+static int safexcel_aead_send(struct crypto_async_request *async, int ring,
+			      int *commands, int *results)
+{
+	struct aead_request *req = aead_request_cast(async);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct safexcel_cipher_req *sreq = aead_request_ctx(req);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ret;
+
+	BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
+
+	if (sreq->needs_inv)
+		ret = safexcel_cipher_send_inv(async, ring, commands, results);
+	else
+		ret = safexcel_send_req(async, ring, sreq, req->src, req->dst,
+					req->cryptlen, req->assoclen,
+					crypto_aead_authsize(tfm), req->iv,
+					commands, results);
+	sreq->rdescs = *results;
+	return ret;
+}
+
+static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm,
+				    struct crypto_async_request *base,
+				    struct safexcel_cipher_req *sreq,
+				    struct safexcel_inv_result *result)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ring = ctx->base.ring;
+
+	init_completion(&result->completion);
+
+	ctx = crypto_tfm_ctx(base->tfm);
+	ctx->base.exit_inv = true;
+	sreq->needs_inv = true;
+
+	spin_lock_bh(&priv->ring[ring].queue_lock);
+	crypto_enqueue_request(&priv->ring[ring].queue, base);
+	spin_unlock_bh(&priv->ring[ring].queue_lock);
+
+	queue_work(priv->ring[ring].workqueue,
+		   &priv->ring[ring].work_data.work);
+
+	wait_for_completion(&result->completion);
+
+	if (result->error) {
+		dev_warn(priv->dev,
+			"cipher: sync: invalidate: completion error %d\n",
+			 result->error);
+		return result->error;
+	}
+
+	return 0;
+}
+
+static int safexcel_skcipher_exit_inv(struct crypto_tfm *tfm)
+{
+	EIP197_REQUEST_ON_STACK(req, skcipher, EIP197_SKCIPHER_REQ_SIZE);
+	struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+	struct safexcel_inv_result result = {};
+
+	memset(req, 0, sizeof(struct skcipher_request));
+
+	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				      safexcel_inv_complete, &result);
+	skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
+
+	return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result);
+}
+
+static int safexcel_aead_exit_inv(struct crypto_tfm *tfm)
+{
+	EIP197_REQUEST_ON_STACK(req, aead, EIP197_AEAD_REQ_SIZE);
+	struct safexcel_cipher_req *sreq = aead_request_ctx(req);
+	struct safexcel_inv_result result = {};
+
+	memset(req, 0, sizeof(struct aead_request));
+
+	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				  safexcel_inv_complete, &result);
+	aead_request_set_tfm(req, __crypto_aead_cast(tfm));
+
+	return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result);
+}
+
+static int safexcel_queue_req(struct crypto_async_request *base,
+			struct safexcel_cipher_req *sreq,
+			enum safexcel_cipher_direction dir)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ret, ring;
+
+	sreq->needs_inv = false;
+	sreq->direction = dir;
+
+	if (ctx->base.ctxr) {
+		if (priv->flags & EIP197_TRC_CACHE && ctx->base.needs_inv) {
+			sreq->needs_inv = true;
+			ctx->base.needs_inv = false;
+		}
+	} else {
+		ctx->base.ring = safexcel_select_ring(priv);
+		ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
+						 EIP197_GFP_FLAGS(*base),
+						 &ctx->base.ctxr_dma);
+		if (!ctx->base.ctxr)
+			return -ENOMEM;
+	}
+
+	ring = ctx->base.ring;
+
+	spin_lock_bh(&priv->ring[ring].queue_lock);
+	ret = crypto_enqueue_request(&priv->ring[ring].queue, base);
+	spin_unlock_bh(&priv->ring[ring].queue_lock);
+
+	queue_work(priv->ring[ring].workqueue,
+		   &priv->ring[ring].work_data.work);
+
+	return ret;
+}
+
+static int safexcel_encrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_ENCRYPT);
+}
+
+static int safexcel_decrypt(struct skcipher_request *req)
+{
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+			SAFEXCEL_DECRYPT);
+}
+
+static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_alg_template *tmpl =
+		container_of(tfm->__crt_alg, struct safexcel_alg_template,
+			     alg.skcipher.base);
+
+	crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
+				    sizeof(struct safexcel_cipher_req));
+
+	ctx->priv = tmpl->priv;
+
+	ctx->base.send = safexcel_skcipher_send;
+	ctx->base.handle_result = safexcel_skcipher_handle_result;
+	return 0;
+}
+
+static int safexcel_cipher_cra_exit(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	memzero_explicit(ctx->key, sizeof(ctx->key));
+
+	/* context not allocated, skip invalidation */
+	if (!ctx->base.ctxr)
+		return -ENOMEM;
+
+	memzero_explicit(ctx->base.ctxr->data, sizeof(ctx->base.ctxr->data));
+	return 0;
+}
+
+static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ret;
+
+	if (safexcel_cipher_cra_exit(tfm))
+		return;
+
+	if (priv->flags & EIP197_TRC_CACHE) {
+		ret = safexcel_skcipher_exit_inv(tfm);
+		if (ret)
+			dev_warn(priv->dev, "skcipher: invalidation error %d\n",
+				 ret);
+	} else {
+		dma_pool_free(priv->context_pool, ctx->base.ctxr,
+			      ctx->base.ctxr_dma);
+	}
+}
+
+static void safexcel_aead_cra_exit(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ret;
+
+	if (safexcel_cipher_cra_exit(tfm))
+		return;
+
+	if (priv->flags & EIP197_TRC_CACHE) {
+		ret = safexcel_aead_exit_inv(tfm);
+		if (ret)
+			dev_warn(priv->dev, "aead: invalidation error %d\n",
+				 ret);
+	} else {
+		dma_pool_free(priv->context_pool, ctx->base.ctxr,
+			      ctx->base.ctxr_dma);
+	}
+}
+
+static int safexcel_skcipher_aes_ecb_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_skcipher_cra_init(tfm);
+	ctx->alg  = SAFEXCEL_AES;
+	ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_ecb_aes = {
+	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.algo_mask = SAFEXCEL_ALG_AES,
+	.alg.skcipher = {
+		.setkey = safexcel_skcipher_aes_setkey,
+		.encrypt = safexcel_encrypt,
+		.decrypt = safexcel_decrypt,
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.base = {
+			.cra_name = "ecb(aes)",
+			.cra_driver_name = "safexcel-ecb-aes",
+			.cra_priority = SAFEXCEL_CRA_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_skcipher_aes_ecb_cra_init,
+			.cra_exit = safexcel_skcipher_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_skcipher_aes_cbc_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_skcipher_cra_init(tfm);
+	ctx->alg  = SAFEXCEL_AES;
+	ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_cbc_aes = {
+	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.algo_mask = SAFEXCEL_ALG_AES,
+	.alg.skcipher = {
+		.setkey = safexcel_skcipher_aes_setkey,
+		.encrypt = safexcel_encrypt,
+		.decrypt = safexcel_decrypt,
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.ivsize = AES_BLOCK_SIZE,
+		.base = {
+			.cra_name = "cbc(aes)",
+			.cra_driver_name = "safexcel-cbc-aes",
+			.cra_priority = SAFEXCEL_CRA_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_skcipher_aes_cbc_cra_init,
+			.cra_exit = safexcel_skcipher_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_skcipher_aes_cfb_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_skcipher_cra_init(tfm);
+	ctx->alg  = SAFEXCEL_AES;
+	ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CFB;
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_cfb_aes = {
+	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_AES_XFB,
+	.alg.skcipher = {
+		.setkey = safexcel_skcipher_aes_setkey,
+		.encrypt = safexcel_encrypt,
+		.decrypt = safexcel_decrypt,
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.ivsize = AES_BLOCK_SIZE,
+		.base = {
+			.cra_name = "cfb(aes)",
+			.cra_driver_name = "safexcel-cfb-aes",
+			.cra_priority = SAFEXCEL_CRA_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = 1,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_skcipher_aes_cfb_cra_init,
+			.cra_exit = safexcel_skcipher_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_skcipher_aes_ofb_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_skcipher_cra_init(tfm);
+	ctx->alg  = SAFEXCEL_AES;
+	ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_OFB;
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_ofb_aes = {
+	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_AES_XFB,
+	.alg.skcipher = {
+		.setkey = safexcel_skcipher_aes_setkey,
+		.encrypt = safexcel_encrypt,
+		.decrypt = safexcel_decrypt,
+		.min_keysize = AES_MIN_KEY_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE,
+		.ivsize = AES_BLOCK_SIZE,
+		.base = {
+			.cra_name = "ofb(aes)",
+			.cra_driver_name = "safexcel-ofb-aes",
+			.cra_priority = SAFEXCEL_CRA_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = 1,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_skcipher_aes_ofb_cra_init,
+			.cra_exit = safexcel_skcipher_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_skcipher_aesctr_setkey(struct crypto_skcipher *ctfm,
+					   const u8 *key, unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	struct crypto_aes_ctx aes;
+	int ret, i;
+	unsigned int keylen;
+
+	/* last 4 bytes of key are the nonce! */
+	ctx->nonce = *(u32 *)(key + len - CTR_RFC3686_NONCE_SIZE);
+	/* exclude the nonce here */
+	keylen = len - CTR_RFC3686_NONCE_SIZE;
+	ret = aes_expandkey(&aes, key, keylen);
+	if (ret) {
+		crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return ret;
+	}
+
+	if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
+		for (i = 0; i < keylen / sizeof(u32); i++) {
+			if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+				ctx->base.needs_inv = true;
+				break;
+			}
+		}
+	}
+
+	for (i = 0; i < keylen / sizeof(u32); i++)
+		ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
+
+	ctx->key_len = keylen;
+
+	memzero_explicit(&aes, sizeof(aes));
+	return 0;
+}
+
+static int safexcel_skcipher_aes_ctr_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_skcipher_cra_init(tfm);
+	ctx->alg  = SAFEXCEL_AES;
+	ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_ctr_aes = {
+	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.algo_mask = SAFEXCEL_ALG_AES,
+	.alg.skcipher = {
+		.setkey = safexcel_skcipher_aesctr_setkey,
+		.encrypt = safexcel_encrypt,
+		.decrypt = safexcel_decrypt,
+		/* Add nonce size */
+		.min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+		.max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+		.ivsize = CTR_RFC3686_IV_SIZE,
+		.base = {
+			.cra_name = "rfc3686(ctr(aes))",
+			.cra_driver_name = "safexcel-ctr-aes",
+			.cra_priority = SAFEXCEL_CRA_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = 1,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_skcipher_aes_ctr_cra_init,
+			.cra_exit = safexcel_skcipher_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
+			       unsigned int len)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
+	int ret;
+
+	ret = verify_skcipher_des_key(ctfm, key);
+	if (ret)
+		return ret;
+
+	/* if context exits and key changed, need to invalidate it */
+	if (ctx->base.ctxr_dma)
+		if (memcmp(ctx->key, key, len))
+			ctx->base.needs_inv = true;
+
+	memcpy(ctx->key, key, len);
+	ctx->key_len = len;
+
+	return 0;
+}
+
+static int safexcel_skcipher_des_cbc_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_skcipher_cra_init(tfm);
+	ctx->alg  = SAFEXCEL_DES;
+	ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_cbc_des = {
+	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.algo_mask = SAFEXCEL_ALG_DES,
+	.alg.skcipher = {
+		.setkey = safexcel_des_setkey,
+		.encrypt = safexcel_encrypt,
+		.decrypt = safexcel_decrypt,
+		.min_keysize = DES_KEY_SIZE,
+		.max_keysize = DES_KEY_SIZE,
+		.ivsize = DES_BLOCK_SIZE,
+		.base = {
+			.cra_name = "cbc(des)",
+			.cra_driver_name = "safexcel-cbc-des",
+			.cra_priority = SAFEXCEL_CRA_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_skcipher_des_cbc_cra_init,
+			.cra_exit = safexcel_skcipher_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_skcipher_des_ecb_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_skcipher_cra_init(tfm);
+	ctx->alg  = SAFEXCEL_DES;
+	ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_ecb_des = {
+	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.algo_mask = SAFEXCEL_ALG_DES,
+	.alg.skcipher = {
+		.setkey = safexcel_des_setkey,
+		.encrypt = safexcel_encrypt,
+		.decrypt = safexcel_decrypt,
+		.min_keysize = DES_KEY_SIZE,
+		.max_keysize = DES_KEY_SIZE,
+		.base = {
+			.cra_name = "ecb(des)",
+			.cra_driver_name = "safexcel-ecb-des",
+			.cra_priority = SAFEXCEL_CRA_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_skcipher_des_ecb_cra_init,
+			.cra_exit = safexcel_skcipher_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
+				   const u8 *key, unsigned int len)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
+	int err;
+
+	err = verify_skcipher_des3_key(ctfm, key);
+	if (err)
+		return err;
+
+	/* if context exits and key changed, need to invalidate it */
+	if (ctx->base.ctxr_dma) {
+		if (memcmp(ctx->key, key, len))
+			ctx->base.needs_inv = true;
+	}
+
+	memcpy(ctx->key, key, len);
+
+	ctx->key_len = len;
+
+	return 0;
+}
+
+static int safexcel_skcipher_des3_cbc_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_skcipher_cra_init(tfm);
+	ctx->alg  = SAFEXCEL_3DES;
+	ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_cbc_des3_ede = {
+	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.algo_mask = SAFEXCEL_ALG_DES,
+	.alg.skcipher = {
+		.setkey = safexcel_des3_ede_setkey,
+		.encrypt = safexcel_encrypt,
+		.decrypt = safexcel_decrypt,
+		.min_keysize = DES3_EDE_KEY_SIZE,
+		.max_keysize = DES3_EDE_KEY_SIZE,
+		.ivsize = DES3_EDE_BLOCK_SIZE,
+		.base = {
+			.cra_name = "cbc(des3_ede)",
+			.cra_driver_name = "safexcel-cbc-des3_ede",
+			.cra_priority = SAFEXCEL_CRA_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_skcipher_des3_cbc_cra_init,
+			.cra_exit = safexcel_skcipher_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_skcipher_des3_ecb_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_skcipher_cra_init(tfm);
+	ctx->alg  = SAFEXCEL_3DES;
+	ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_ecb_des3_ede = {
+	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.algo_mask = SAFEXCEL_ALG_DES,
+	.alg.skcipher = {
+		.setkey = safexcel_des3_ede_setkey,
+		.encrypt = safexcel_encrypt,
+		.decrypt = safexcel_decrypt,
+		.min_keysize = DES3_EDE_KEY_SIZE,
+		.max_keysize = DES3_EDE_KEY_SIZE,
+		.base = {
+			.cra_name = "ecb(des3_ede)",
+			.cra_driver_name = "safexcel-ecb-des3_ede",
+			.cra_priority = SAFEXCEL_CRA_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_skcipher_des3_ecb_cra_init,
+			.cra_exit = safexcel_skcipher_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_aead_encrypt(struct aead_request *req)
+{
+	struct safexcel_cipher_req *creq = aead_request_ctx(req);
+
+	return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT);
+}
+
+static int safexcel_aead_decrypt(struct aead_request *req)
+{
+	struct safexcel_cipher_req *creq = aead_request_ctx(req);
+
+	return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT);
+}
+
+static int safexcel_aead_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_alg_template *tmpl =
+		container_of(tfm->__crt_alg, struct safexcel_alg_template,
+			     alg.aead.base);
+
+	crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
+				sizeof(struct safexcel_cipher_req));
+
+	ctx->priv = tmpl->priv;
+
+	ctx->alg  = SAFEXCEL_AES; /* default */
+	ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC; /* default */
+	ctx->aead = true;
+	ctx->base.send = safexcel_aead_send;
+	ctx->base.handle_result = safexcel_aead_handle_result;
+	return 0;
+}
+
+static int safexcel_aead_sha1_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_aead_cra_init(tfm);
+	ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
+	ctx->state_sz = SHA1_DIGEST_SIZE;
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = {
+	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA1,
+	.alg.aead = {
+		.setkey = safexcel_aead_setkey,
+		.encrypt = safexcel_aead_encrypt,
+		.decrypt = safexcel_aead_decrypt,
+		.ivsize = AES_BLOCK_SIZE,
+		.maxauthsize = SHA1_DIGEST_SIZE,
+		.base = {
+			.cra_name = "authenc(hmac(sha1),cbc(aes))",
+			.cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-aes",
+			.cra_priority = SAFEXCEL_CRA_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_aead_sha1_cra_init,
+			.cra_exit = safexcel_aead_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_aead_sha256_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_aead_cra_init(tfm);
+	ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
+	ctx->state_sz = SHA256_DIGEST_SIZE;
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = {
+	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_256,
+	.alg.aead = {
+		.setkey = safexcel_aead_setkey,
+		.encrypt = safexcel_aead_encrypt,
+		.decrypt = safexcel_aead_decrypt,
+		.ivsize = AES_BLOCK_SIZE,
+		.maxauthsize = SHA256_DIGEST_SIZE,
+		.base = {
+			.cra_name = "authenc(hmac(sha256),cbc(aes))",
+			.cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-aes",
+			.cra_priority = SAFEXCEL_CRA_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_aead_sha256_cra_init,
+			.cra_exit = safexcel_aead_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_aead_sha224_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_aead_cra_init(tfm);
+	ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
+	ctx->state_sz = SHA256_DIGEST_SIZE;
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = {
+	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_256,
+	.alg.aead = {
+		.setkey = safexcel_aead_setkey,
+		.encrypt = safexcel_aead_encrypt,
+		.decrypt = safexcel_aead_decrypt,
+		.ivsize = AES_BLOCK_SIZE,
+		.maxauthsize = SHA224_DIGEST_SIZE,
+		.base = {
+			.cra_name = "authenc(hmac(sha224),cbc(aes))",
+			.cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-aes",
+			.cra_priority = SAFEXCEL_CRA_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_aead_sha224_cra_init,
+			.cra_exit = safexcel_aead_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_aead_sha512_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_aead_cra_init(tfm);
+	ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
+	ctx->state_sz = SHA512_DIGEST_SIZE;
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = {
+	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_512,
+	.alg.aead = {
+		.setkey = safexcel_aead_setkey,
+		.encrypt = safexcel_aead_encrypt,
+		.decrypt = safexcel_aead_decrypt,
+		.ivsize = AES_BLOCK_SIZE,
+		.maxauthsize = SHA512_DIGEST_SIZE,
+		.base = {
+			.cra_name = "authenc(hmac(sha512),cbc(aes))",
+			.cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-aes",
+			.cra_priority = SAFEXCEL_CRA_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_aead_sha512_cra_init,
+			.cra_exit = safexcel_aead_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_aead_sha384_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_aead_cra_init(tfm);
+	ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
+	ctx->state_sz = SHA512_DIGEST_SIZE;
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = {
+	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_512,
+	.alg.aead = {
+		.setkey = safexcel_aead_setkey,
+		.encrypt = safexcel_aead_encrypt,
+		.decrypt = safexcel_aead_decrypt,
+		.ivsize = AES_BLOCK_SIZE,
+		.maxauthsize = SHA384_DIGEST_SIZE,
+		.base = {
+			.cra_name = "authenc(hmac(sha384),cbc(aes))",
+			.cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-aes",
+			.cra_priority = SAFEXCEL_CRA_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_aead_sha384_cra_init,
+			.cra_exit = safexcel_aead_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_aead_sha1_des3_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_aead_sha1_cra_init(tfm);
+	ctx->alg = SAFEXCEL_3DES; /* override default */
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede = {
+	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA1,
+	.alg.aead = {
+		.setkey = safexcel_aead_setkey,
+		.encrypt = safexcel_aead_encrypt,
+		.decrypt = safexcel_aead_decrypt,
+		.ivsize = DES3_EDE_BLOCK_SIZE,
+		.maxauthsize = SHA1_DIGEST_SIZE,
+		.base = {
+			.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
+			.cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-des3_ede",
+			.cra_priority = SAFEXCEL_CRA_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_aead_sha1_des3_cra_init,
+			.cra_exit = safexcel_aead_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_aead_sha1_ctr_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_aead_sha1_cra_init(tfm);
+	ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_aes = {
+	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA1,
+	.alg.aead = {
+		.setkey = safexcel_aead_setkey,
+		.encrypt = safexcel_aead_encrypt,
+		.decrypt = safexcel_aead_decrypt,
+		.ivsize = CTR_RFC3686_IV_SIZE,
+		.maxauthsize = SHA1_DIGEST_SIZE,
+		.base = {
+			.cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
+			.cra_driver_name = "safexcel-authenc-hmac-sha1-ctr-aes",
+			.cra_priority = SAFEXCEL_CRA_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = 1,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_aead_sha1_ctr_cra_init,
+			.cra_exit = safexcel_aead_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_aead_sha256_ctr_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_aead_sha256_cra_init(tfm);
+	ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes = {
+	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_256,
+	.alg.aead = {
+		.setkey = safexcel_aead_setkey,
+		.encrypt = safexcel_aead_encrypt,
+		.decrypt = safexcel_aead_decrypt,
+		.ivsize = CTR_RFC3686_IV_SIZE,
+		.maxauthsize = SHA256_DIGEST_SIZE,
+		.base = {
+			.cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
+			.cra_driver_name = "safexcel-authenc-hmac-sha256-ctr-aes",
+			.cra_priority = SAFEXCEL_CRA_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = 1,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_aead_sha256_ctr_cra_init,
+			.cra_exit = safexcel_aead_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_aead_sha224_ctr_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_aead_sha224_cra_init(tfm);
+	ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_ctr_aes = {
+	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_256,
+	.alg.aead = {
+		.setkey = safexcel_aead_setkey,
+		.encrypt = safexcel_aead_encrypt,
+		.decrypt = safexcel_aead_decrypt,
+		.ivsize = CTR_RFC3686_IV_SIZE,
+		.maxauthsize = SHA224_DIGEST_SIZE,
+		.base = {
+			.cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
+			.cra_driver_name = "safexcel-authenc-hmac-sha224-ctr-aes",
+			.cra_priority = SAFEXCEL_CRA_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = 1,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_aead_sha224_ctr_cra_init,
+			.cra_exit = safexcel_aead_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_aead_sha512_ctr_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_aead_sha512_cra_init(tfm);
+	ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes = {
+	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_512,
+	.alg.aead = {
+		.setkey = safexcel_aead_setkey,
+		.encrypt = safexcel_aead_encrypt,
+		.decrypt = safexcel_aead_decrypt,
+		.ivsize = CTR_RFC3686_IV_SIZE,
+		.maxauthsize = SHA512_DIGEST_SIZE,
+		.base = {
+			.cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
+			.cra_driver_name = "safexcel-authenc-hmac-sha512-ctr-aes",
+			.cra_priority = SAFEXCEL_CRA_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = 1,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_aead_sha512_ctr_cra_init,
+			.cra_exit = safexcel_aead_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_aead_sha384_ctr_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_aead_sha384_cra_init(tfm);
+	ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */
+	return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_ctr_aes = {
+	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_SHA2_512,
+	.alg.aead = {
+		.setkey = safexcel_aead_setkey,
+		.encrypt = safexcel_aead_encrypt,
+		.decrypt = safexcel_aead_decrypt,
+		.ivsize = CTR_RFC3686_IV_SIZE,
+		.maxauthsize = SHA384_DIGEST_SIZE,
+		.base = {
+			.cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
+			.cra_driver_name = "safexcel-authenc-hmac-sha384-ctr-aes",
+			.cra_priority = SAFEXCEL_CRA_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = 1,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_aead_sha384_ctr_cra_init,
+			.cra_exit = safexcel_aead_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm,
+					   const u8 *key, unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	struct crypto_aes_ctx aes;
+	int ret, i;
+	unsigned int keylen;
+
+	/* Check for illegal XTS keys */
+	ret = xts_verify_key(ctfm, key, len);
+	if (ret)
+		return ret;
+
+	/* Only half of the key data is cipher key */
+	keylen = (len >> 1);
+	ret = aes_expandkey(&aes, key, keylen);
+	if (ret) {
+		crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return ret;
+	}
+
+	if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
+		for (i = 0; i < keylen / sizeof(u32); i++) {
+			if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+				ctx->base.needs_inv = true;
+				break;
+			}
+		}
+	}
+
+	for (i = 0; i < keylen / sizeof(u32); i++)
+		ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
+
+	/* The other half is the tweak key */
+	ret = aes_expandkey(&aes, (u8 *)(key + keylen), keylen);
+	if (ret) {
+		crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return ret;
+	}
+
+	if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
+		for (i = 0; i < keylen / sizeof(u32); i++) {
+			if (ctx->key[i + keylen / sizeof(u32)] !=
+			    cpu_to_le32(aes.key_enc[i])) {
+				ctx->base.needs_inv = true;
+				break;
+			}
+		}
+	}
+
+	for (i = 0; i < keylen / sizeof(u32); i++)
+		ctx->key[i + keylen / sizeof(u32)] =
+			cpu_to_le32(aes.key_enc[i]);
+
+	ctx->key_len = keylen << 1;
+
+	memzero_explicit(&aes, sizeof(aes));
+	return 0;
+}
+
+static int safexcel_skcipher_aes_xts_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_skcipher_cra_init(tfm);
+	ctx->alg  = SAFEXCEL_AES;
+	ctx->xts  = 1;
+	ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XTS;
+	return 0;
+}
+
+static int safexcel_encrypt_xts(struct skcipher_request *req)
+{
+	if (req->cryptlen < XTS_BLOCK_SIZE)
+		return -EINVAL;
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+				  SAFEXCEL_ENCRYPT);
+}
+
+static int safexcel_decrypt_xts(struct skcipher_request *req)
+{
+	if (req->cryptlen < XTS_BLOCK_SIZE)
+		return -EINVAL;
+	return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
+				  SAFEXCEL_DECRYPT);
+}
+
+struct safexcel_alg_template safexcel_alg_xts_aes = {
+	.type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+	.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_AES_XTS,
+	.alg.skcipher = {
+		.setkey = safexcel_skcipher_aesxts_setkey,
+		.encrypt = safexcel_encrypt_xts,
+		.decrypt = safexcel_decrypt_xts,
+		/* XTS actually uses 2 AES keys glued together */
+		.min_keysize = AES_MIN_KEY_SIZE * 2,
+		.max_keysize = AES_MAX_KEY_SIZE * 2,
+		.ivsize = XTS_BLOCK_SIZE,
+		.base = {
+			.cra_name = "xts(aes)",
+			.cra_driver_name = "safexcel-xts-aes",
+			.cra_priority = SAFEXCEL_CRA_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = XTS_BLOCK_SIZE,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_skcipher_aes_xts_cra_init,
+			.cra_exit = safexcel_skcipher_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
+				    unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	struct crypto_aes_ctx aes;
+	u32 hashkey[AES_BLOCK_SIZE >> 2];
+	int ret, i;
+
+	ret = aes_expandkey(&aes, key, len);
+	if (ret) {
+		crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		memzero_explicit(&aes, sizeof(aes));
+		return ret;
+	}
+
+	if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
+		for (i = 0; i < len / sizeof(u32); i++) {
+			if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+				ctx->base.needs_inv = true;
+				break;
+			}
+		}
+	}
+
+	for (i = 0; i < len / sizeof(u32); i++)
+		ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
+
+	ctx->key_len = len;
+
+	/* Compute hash key by encrypting zeroes with cipher key */
+	crypto_cipher_clear_flags(ctx->hkaes, CRYPTO_TFM_REQ_MASK);
+	crypto_cipher_set_flags(ctx->hkaes, crypto_aead_get_flags(ctfm) &
+				CRYPTO_TFM_REQ_MASK);
+	ret = crypto_cipher_setkey(ctx->hkaes, key, len);
+	crypto_aead_set_flags(ctfm, crypto_cipher_get_flags(ctx->hkaes) &
+			      CRYPTO_TFM_RES_MASK);
+	if (ret)
+		return ret;
+
+	memset(hashkey, 0, AES_BLOCK_SIZE);
+	crypto_cipher_encrypt_one(ctx->hkaes, (u8 *)hashkey, (u8 *)hashkey);
+
+	if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
+		for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++) {
+			if (ctx->ipad[i] != cpu_to_be32(hashkey[i])) {
+				ctx->base.needs_inv = true;
+				break;
+			}
+		}
+	}
+
+	for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++)
+		ctx->ipad[i] = cpu_to_be32(hashkey[i]);
+
+	memzero_explicit(hashkey, AES_BLOCK_SIZE);
+	memzero_explicit(&aes, sizeof(aes));
+	return 0;
+}
+
+static int safexcel_aead_gcm_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_aead_cra_init(tfm);
+	ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_GHASH;
+	ctx->state_sz = GHASH_BLOCK_SIZE;
+	ctx->xcm = EIP197_XCM_MODE_GCM;
+	ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */
+
+	ctx->hkaes = crypto_alloc_cipher("aes", 0, 0);
+	if (IS_ERR(ctx->hkaes))
+		return PTR_ERR(ctx->hkaes);
+
+	return 0;
+}
+
+static void safexcel_aead_gcm_cra_exit(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	crypto_free_cipher(ctx->hkaes);
+	safexcel_aead_cra_exit(tfm);
+}
+
+static int safexcel_aead_gcm_setauthsize(struct crypto_aead *tfm,
+					 unsigned int authsize)
+{
+	return crypto_gcm_check_authsize(authsize);
+}
+
+struct safexcel_alg_template safexcel_alg_gcm = {
+	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_GHASH,
+	.alg.aead = {
+		.setkey = safexcel_aead_gcm_setkey,
+		.setauthsize = safexcel_aead_gcm_setauthsize,
+		.encrypt = safexcel_aead_encrypt,
+		.decrypt = safexcel_aead_decrypt,
+		.ivsize = GCM_AES_IV_SIZE,
+		.maxauthsize = GHASH_DIGEST_SIZE,
+		.base = {
+			.cra_name = "gcm(aes)",
+			.cra_driver_name = "safexcel-gcm-aes",
+			.cra_priority = SAFEXCEL_CRA_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = 1,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_aead_gcm_cra_init,
+			.cra_exit = safexcel_aead_gcm_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
+
+static int safexcel_aead_ccm_setkey(struct crypto_aead *ctfm, const u8 *key,
+				    unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	struct crypto_aes_ctx aes;
+	int ret, i;
+
+	ret = aes_expandkey(&aes, key, len);
+	if (ret) {
+		crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		memzero_explicit(&aes, sizeof(aes));
+		return ret;
+	}
+
+	if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
+		for (i = 0; i < len / sizeof(u32); i++) {
+			if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+				ctx->base.needs_inv = true;
+				break;
+			}
+		}
+	}
+
+	for (i = 0; i < len / sizeof(u32); i++) {
+		ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
+		ctx->ipad[i + 2 * AES_BLOCK_SIZE / sizeof(u32)] =
+			cpu_to_be32(aes.key_enc[i]);
+	}
+
+	ctx->key_len = len;
+	ctx->state_sz = 2 * AES_BLOCK_SIZE + len;
+
+	if (len == AES_KEYSIZE_192)
+		ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC192;
+	else if (len == AES_KEYSIZE_256)
+		ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC256;
+	else
+		ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128;
+
+	memzero_explicit(&aes, sizeof(aes));
+	return 0;
+}
+
+static int safexcel_aead_ccm_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	safexcel_aead_cra_init(tfm);
+	ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128;
+	ctx->state_sz = 3 * AES_BLOCK_SIZE;
+	ctx->xcm = EIP197_XCM_MODE_CCM;
+	ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */
+	return 0;
+}
+
+static int safexcel_aead_ccm_setauthsize(struct crypto_aead *tfm,
+					 unsigned int authsize)
+{
+	/* Borrowed from crypto/ccm.c */
+	switch (authsize) {
+	case 4:
+	case 6:
+	case 8:
+	case 10:
+	case 12:
+	case 14:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int safexcel_ccm_encrypt(struct aead_request *req)
+{
+	struct safexcel_cipher_req *creq = aead_request_ctx(req);
+
+	if (req->iv[0] < 1 || req->iv[0] > 7)
+		return -EINVAL;
+
+	return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT);
+}
+
+static int safexcel_ccm_decrypt(struct aead_request *req)
+{
+	struct safexcel_cipher_req *creq = aead_request_ctx(req);
+
+	if (req->iv[0] < 1 || req->iv[0] > 7)
+		return -EINVAL;
+
+	return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT);
+}
+
+struct safexcel_alg_template safexcel_alg_ccm = {
+	.type = SAFEXCEL_ALG_TYPE_AEAD,
+	.algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_CBC_MAC_ALL,
+	.alg.aead = {
+		.setkey = safexcel_aead_ccm_setkey,
+		.setauthsize = safexcel_aead_ccm_setauthsize,
+		.encrypt = safexcel_ccm_encrypt,
+		.decrypt = safexcel_ccm_decrypt,
+		.ivsize = AES_BLOCK_SIZE,
+		.maxauthsize = AES_BLOCK_SIZE,
+		.base = {
+			.cra_name = "ccm(aes)",
+			.cra_driver_name = "safexcel-ccm-aes",
+			.cra_priority = SAFEXCEL_CRA_PRIORITY,
+			.cra_flags = CRYPTO_ALG_ASYNC |
+				     CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_blocksize = 1,
+			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+			.cra_alignmask = 0,
+			.cra_init = safexcel_aead_ccm_cra_init,
+			.cra_exit = safexcel_aead_cra_exit,
+			.cra_module = THIS_MODULE,
+		},
+	},
+};
diff --git a/marvell/linux/drivers/crypto/inside-secure/safexcel_hash.c b/marvell/linux/drivers/crypto/inside-secure/safexcel_hash.c
new file mode 100644
index 0000000..2effb6d
--- /dev/null
+++ b/marvell/linux/drivers/crypto/inside-secure/safexcel_hash.c
@@ -0,0 +1,1742 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Marvell
+ *
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ */
+
+#include <crypto/hmac.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+
+#include "safexcel.h"
+
+struct safexcel_ahash_ctx {
+	struct safexcel_context base;
+	struct safexcel_crypto_priv *priv;
+
+	u32 alg;
+
+	u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
+	u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
+};
+
+struct safexcel_ahash_req {
+	bool last_req;
+	bool finish;
+	bool hmac;
+	bool needs_inv;
+	bool hmac_zlen;
+	bool len_is_le;
+
+	int nents;
+	dma_addr_t result_dma;
+
+	u32 digest;
+
+	u8 state_sz;    /* expected state size, only set once */
+	u8 block_sz;    /* block size, only set once */
+	u32 state[SHA512_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
+
+	u64 len;
+	u64 processed;
+
+	u8 cache[HASH_CACHE_SIZE] __aligned(sizeof(u32));
+	dma_addr_t cache_dma;
+	unsigned int cache_sz;
+
+	u8 cache_next[HASH_CACHE_SIZE] __aligned(sizeof(u32));
+};
+
+static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
+{
+	return req->len - req->processed;
+}
+
+static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
+				u32 input_length, u32 result_length)
+{
+	struct safexcel_token *token =
+		(struct safexcel_token *)cdesc->control_data.token;
+
+	token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+	token[0].packet_length = input_length;
+	token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
+	token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+
+	token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
+	token[1].packet_length = result_length;
+	token[1].stat = EIP197_TOKEN_STAT_LAST_HASH |
+			EIP197_TOKEN_STAT_LAST_PACKET;
+	token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+				EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+}
+
+static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
+				     struct safexcel_ahash_req *req,
+				     struct safexcel_command_desc *cdesc)
+{
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	u64 count = 0;
+
+	cdesc->control_data.control0 |= ctx->alg;
+
+	/*
+	 * Copy the input digest if needed, and setup the context
+	 * fields. Do this now as we need it to setup the first command
+	 * descriptor.
+	 */
+	if (!req->processed) {
+		/* First - and possibly only - block of basic hash only */
+		if (req->finish) {
+			cdesc->control_data.control0 |=
+				CONTEXT_CONTROL_TYPE_HASH_OUT |
+				CONTEXT_CONTROL_RESTART_HASH  |
+				/* ensure its not 0! */
+				CONTEXT_CONTROL_SIZE(1);
+		} else {
+			cdesc->control_data.control0 |=
+				CONTEXT_CONTROL_TYPE_HASH_OUT  |
+				CONTEXT_CONTROL_RESTART_HASH   |
+				CONTEXT_CONTROL_NO_FINISH_HASH |
+				/* ensure its not 0! */
+				CONTEXT_CONTROL_SIZE(1);
+		}
+		return;
+	}
+
+	/* Hash continuation or HMAC, setup (inner) digest from state */
+	memcpy(ctx->base.ctxr->data, req->state, req->state_sz);
+
+	if (req->finish) {
+		/* Compute digest count for hash/HMAC finish operations */
+		if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) ||
+		    req->hmac_zlen || (req->processed != req->block_sz)) {
+			count = req->processed / EIP197_COUNTER_BLOCK_SIZE;
+
+			/* This is a hardware limitation, as the
+			 * counter must fit into an u32. This represents
+			 * a fairly big amount of input data, so we
+			 * shouldn't see this.
+			 */
+			if (unlikely(count & 0xffffffff00000000ULL)) {
+				dev_warn(priv->dev,
+					 "Input data is too big\n");
+				return;
+			}
+		}
+
+		if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) ||
+		    /* Special case: zero length HMAC */
+		    req->hmac_zlen ||
+		    /* PE HW < 4.4 cannot do HMAC continue, fake using hash */
+		    (req->processed != req->block_sz)) {
+			/* Basic hash continue operation, need digest + cnt */
+			cdesc->control_data.control0 |=
+				CONTEXT_CONTROL_SIZE((req->state_sz >> 2) + 1) |
+				CONTEXT_CONTROL_TYPE_HASH_OUT |
+				CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+			/* For zero-len HMAC, don't finalize, already padded! */
+			if (req->hmac_zlen)
+				cdesc->control_data.control0 |=
+					CONTEXT_CONTROL_NO_FINISH_HASH;
+			cdesc->control_data.control1 |=
+				CONTEXT_CONTROL_DIGEST_CNT;
+			ctx->base.ctxr->data[req->state_sz >> 2] =
+				cpu_to_le32(count);
+			req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+
+			/* Clear zero-length HMAC flag for next operation! */
+			req->hmac_zlen = false;
+		} else { /* HMAC */
+			/* Need outer digest for HMAC finalization */
+			memcpy(ctx->base.ctxr->data + (req->state_sz >> 2),
+			       ctx->opad, req->state_sz);
+
+			/* Single pass HMAC - no digest count */
+			cdesc->control_data.control0 |=
+				CONTEXT_CONTROL_SIZE(req->state_sz >> 1) |
+				CONTEXT_CONTROL_TYPE_HASH_OUT |
+				CONTEXT_CONTROL_DIGEST_HMAC;
+		}
+	} else { /* Hash continuation, do not finish yet */
+		cdesc->control_data.control0 |=
+			CONTEXT_CONTROL_SIZE(req->state_sz >> 2) |
+			CONTEXT_CONTROL_DIGEST_PRECOMPUTED |
+			CONTEXT_CONTROL_TYPE_HASH_OUT |
+			CONTEXT_CONTROL_NO_FINISH_HASH;
+	}
+}
+
+static int safexcel_ahash_enqueue(struct ahash_request *areq);
+
+static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
+				      int ring,
+				      struct crypto_async_request *async,
+				      bool *should_complete, int *ret)
+{
+	struct safexcel_result_desc *rdesc;
+	struct ahash_request *areq = ahash_request_cast(async);
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+	struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
+	u64 cache_len;
+
+	*ret = 0;
+
+	rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
+	if (IS_ERR(rdesc)) {
+		dev_err(priv->dev,
+			"hash: result: could not retrieve the result descriptor\n");
+		*ret = PTR_ERR(rdesc);
+	} else {
+		*ret = safexcel_rdesc_check_errors(priv, rdesc);
+	}
+
+	safexcel_complete(priv, ring);
+
+	if (sreq->nents) {
+		dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
+		sreq->nents = 0;
+	}
+
+	if (sreq->result_dma) {
+		dma_unmap_single(priv->dev, sreq->result_dma, sreq->state_sz,
+				 DMA_FROM_DEVICE);
+		sreq->result_dma = 0;
+	}
+
+	if (sreq->cache_dma) {
+		dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz,
+				 DMA_TO_DEVICE);
+		sreq->cache_dma = 0;
+		sreq->cache_sz = 0;
+	}
+
+	if (sreq->finish) {
+		if (sreq->hmac &&
+		    (sreq->digest != CONTEXT_CONTROL_DIGEST_HMAC)) {
+			/* Faking HMAC using hash - need to do outer hash */
+			memcpy(sreq->cache, sreq->state,
+			       crypto_ahash_digestsize(ahash));
+
+			memcpy(sreq->state, ctx->opad, sreq->state_sz);
+
+			sreq->len = sreq->block_sz +
+				    crypto_ahash_digestsize(ahash);
+			sreq->processed = sreq->block_sz;
+			sreq->hmac = 0;
+
+			ctx->base.needs_inv = true;
+			areq->nbytes = 0;
+			safexcel_ahash_enqueue(areq);
+
+			*should_complete = false; /* Not done yet */
+			return 1;
+		}
+
+		memcpy(areq->result, sreq->state,
+		       crypto_ahash_digestsize(ahash));
+	}
+
+	cache_len = safexcel_queued_len(sreq);
+	if (cache_len)
+		memcpy(sreq->cache, sreq->cache_next, cache_len);
+
+	*should_complete = true;
+
+	return 1;
+}
+
+static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
+				   int *commands, int *results)
+{
+	struct ahash_request *areq = ahash_request_cast(async);
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
+	struct safexcel_result_desc *rdesc;
+	struct scatterlist *sg;
+	int i, extra = 0, n_cdesc = 0, ret = 0;
+	u64 queued, len, cache_len;
+
+	queued = len = safexcel_queued_len(req);
+	if (queued <= HASH_CACHE_SIZE)
+		cache_len = queued;
+	else
+		cache_len = queued - areq->nbytes;
+
+	if (!req->finish && !req->last_req) {
+		/* If this is not the last request and the queued data does not
+		 * fit into full cache blocks, cache it for the next send call.
+		 */
+		extra = queued & (HASH_CACHE_SIZE - 1);
+
+		/* If this is not the last request and the queued data
+		 * is a multiple of a block, cache the last one for now.
+		 */
+		if (!extra)
+			extra = HASH_CACHE_SIZE;
+
+		sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+				   req->cache_next, extra,
+				   areq->nbytes - extra);
+
+		queued -= extra;
+		len -= extra;
+
+		if (!queued) {
+			*commands = 0;
+			*results = 0;
+			return 0;
+		}
+	}
+
+	/* Add a command descriptor for the cached data, if any */
+	if (cache_len) {
+		req->cache_dma = dma_map_single(priv->dev, req->cache,
+						cache_len, DMA_TO_DEVICE);
+		if (dma_mapping_error(priv->dev, req->cache_dma))
+			return -EINVAL;
+
+		req->cache_sz = cache_len;
+		first_cdesc = safexcel_add_cdesc(priv, ring, 1,
+						 (cache_len == len),
+						 req->cache_dma, cache_len, len,
+						 ctx->base.ctxr_dma);
+		if (IS_ERR(first_cdesc)) {
+			ret = PTR_ERR(first_cdesc);
+			goto unmap_cache;
+		}
+		n_cdesc++;
+
+		queued -= cache_len;
+		if (!queued)
+			goto send_command;
+	}
+
+	/* Skip descriptor generation for zero-length requests */
+	if (!areq->nbytes)
+		goto send_command;
+
+	/* Now handle the current ahash request buffer(s) */
+	req->nents = dma_map_sg(priv->dev, areq->src,
+				sg_nents_for_len(areq->src,
+						 areq->nbytes),
+				DMA_TO_DEVICE);
+	if (!req->nents) {
+		ret = -ENOMEM;
+		goto cdesc_rollback;
+	}
+
+	for_each_sg(areq->src, sg, req->nents, i) {
+		int sglen = sg_dma_len(sg);
+
+		/* Do not overflow the request */
+		if (queued < sglen)
+			sglen = queued;
+
+		cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
+					   !(queued - sglen),
+					   sg_dma_address(sg),
+					   sglen, len, ctx->base.ctxr_dma);
+		if (IS_ERR(cdesc)) {
+			ret = PTR_ERR(cdesc);
+			goto unmap_sg;
+		}
+		n_cdesc++;
+
+		if (n_cdesc == 1)
+			first_cdesc = cdesc;
+
+		queued -= sglen;
+		if (!queued)
+			break;
+	}
+
+send_command:
+	/* Setup the context options */
+	safexcel_context_control(ctx, req, first_cdesc);
+
+	/* Add the token */
+	safexcel_hash_token(first_cdesc, len, req->state_sz);
+
+	req->result_dma = dma_map_single(priv->dev, req->state, req->state_sz,
+					 DMA_FROM_DEVICE);
+	if (dma_mapping_error(priv->dev, req->result_dma)) {
+		ret = -EINVAL;
+		goto unmap_sg;
+	}
+
+	/* Add a result descriptor */
+	rdesc = safexcel_add_rdesc(priv, ring, 1, 1, req->result_dma,
+				   req->state_sz);
+	if (IS_ERR(rdesc)) {
+		ret = PTR_ERR(rdesc);
+		goto unmap_result;
+	}
+
+	safexcel_rdr_req_set(priv, ring, rdesc, &areq->base);
+
+	req->processed += len;
+
+	*commands = n_cdesc;
+	*results = 1;
+	return 0;
+
+unmap_result:
+	dma_unmap_single(priv->dev, req->result_dma, req->state_sz,
+			 DMA_FROM_DEVICE);
+unmap_sg:
+	dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
+cdesc_rollback:
+	for (i = 0; i < n_cdesc; i++)
+		safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
+unmap_cache:
+	if (req->cache_dma) {
+		dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
+				 DMA_TO_DEVICE);
+		req->cache_dma = 0;
+		req->cache_sz = 0;
+	}
+
+	return ret;
+}
+
+static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
+				      int ring,
+				      struct crypto_async_request *async,
+				      bool *should_complete, int *ret)
+{
+	struct safexcel_result_desc *rdesc;
+	struct ahash_request *areq = ahash_request_cast(async);
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
+	int enq_ret;
+
+	*ret = 0;
+
+	rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
+	if (IS_ERR(rdesc)) {
+		dev_err(priv->dev,
+			"hash: invalidate: could not retrieve the result descriptor\n");
+		*ret = PTR_ERR(rdesc);
+	} else {
+		*ret = safexcel_rdesc_check_errors(priv, rdesc);
+	}
+
+	safexcel_complete(priv, ring);
+
+	if (ctx->base.exit_inv) {
+		dma_pool_free(priv->context_pool, ctx->base.ctxr,
+			      ctx->base.ctxr_dma);
+
+		*should_complete = true;
+		return 1;
+	}
+
+	ring = safexcel_select_ring(priv);
+	ctx->base.ring = ring;
+
+	spin_lock_bh(&priv->ring[ring].queue_lock);
+	enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
+	spin_unlock_bh(&priv->ring[ring].queue_lock);
+
+	if (enq_ret != -EINPROGRESS)
+		*ret = enq_ret;
+
+	queue_work(priv->ring[ring].workqueue,
+		   &priv->ring[ring].work_data.work);
+
+	*should_complete = false;
+
+	return 1;
+}
+
+static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
+				  struct crypto_async_request *async,
+				  bool *should_complete, int *ret)
+{
+	struct ahash_request *areq = ahash_request_cast(async);
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	int err;
+
+	BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && req->needs_inv);
+
+	if (req->needs_inv) {
+		req->needs_inv = false;
+		err = safexcel_handle_inv_result(priv, ring, async,
+						 should_complete, ret);
+	} else {
+		err = safexcel_handle_req_result(priv, ring, async,
+						 should_complete, ret);
+	}
+
+	return err;
+}
+
+static int safexcel_ahash_send_inv(struct crypto_async_request *async,
+				   int ring, int *commands, int *results)
+{
+	struct ahash_request *areq = ahash_request_cast(async);
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	int ret;
+
+	ret = safexcel_invalidate_cache(async, ctx->priv,
+					ctx->base.ctxr_dma, ring);
+	if (unlikely(ret))
+		return ret;
+
+	*commands = 1;
+	*results = 1;
+
+	return 0;
+}
+
+static int safexcel_ahash_send(struct crypto_async_request *async,
+			       int ring, int *commands, int *results)
+{
+	struct ahash_request *areq = ahash_request_cast(async);
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	int ret;
+
+	if (req->needs_inv)
+		ret = safexcel_ahash_send_inv(async, ring, commands, results);
+	else
+		ret = safexcel_ahash_send_req(async, ring, commands, results);
+
+	return ret;
+}
+
+static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	EIP197_REQUEST_ON_STACK(req, ahash, EIP197_AHASH_REQ_SIZE);
+	struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
+	struct safexcel_inv_result result = {};
+	int ring = ctx->base.ring;
+
+	memset(req, 0, EIP197_AHASH_REQ_SIZE);
+
+	/* create invalidation request */
+	init_completion(&result.completion);
+	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				   safexcel_inv_complete, &result);
+
+	ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
+	ctx = crypto_tfm_ctx(req->base.tfm);
+	ctx->base.exit_inv = true;
+	rctx->needs_inv = true;
+
+	spin_lock_bh(&priv->ring[ring].queue_lock);
+	crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
+	spin_unlock_bh(&priv->ring[ring].queue_lock);
+
+	queue_work(priv->ring[ring].workqueue,
+		   &priv->ring[ring].work_data.work);
+
+	wait_for_completion(&result.completion);
+
+	if (result.error) {
+		dev_warn(priv->dev, "hash: completion error (%d)\n",
+			 result.error);
+		return result.error;
+	}
+
+	return 0;
+}
+
+/* safexcel_ahash_cache: cache data until at least one request can be sent to
+ * the engine, aka. when there is at least 1 block size in the pipe.
+ */
+static int safexcel_ahash_cache(struct ahash_request *areq)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	u64 cache_len;
+
+	/* cache_len: everything accepted by the driver but not sent yet,
+	 * tot sz handled by update() - last req sz - tot sz handled by send()
+	 */
+	cache_len = safexcel_queued_len(req);
+
+	/*
+	 * In case there isn't enough bytes to proceed (less than a
+	 * block size), cache the data until we have enough.
+	 */
+	if (cache_len + areq->nbytes <= HASH_CACHE_SIZE) {
+		sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+				   req->cache + cache_len,
+				   areq->nbytes, 0);
+		return 0;
+	}
+
+	/* We couldn't cache all the data */
+	return -E2BIG;
+}
+
+static int safexcel_ahash_enqueue(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ret, ring;
+
+	req->needs_inv = false;
+
+	if (ctx->base.ctxr) {
+		if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv &&
+		    req->processed &&
+		    (/* invalidate for basic hash continuation finish */
+		     (req->finish &&
+		      (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)) ||
+		     /* invalidate if (i)digest changed */
+		     memcmp(ctx->base.ctxr->data, req->state, req->state_sz) ||
+		     /* invalidate for HMAC continuation finish */
+		     (req->finish && (req->processed != req->block_sz)) ||
+		     /* invalidate for HMAC finish with odigest changed */
+		     (req->finish &&
+		      memcmp(ctx->base.ctxr->data + (req->state_sz>>2),
+			     ctx->opad, req->state_sz))))
+			/*
+			 * We're still setting needs_inv here, even though it is
+			 * cleared right away, because the needs_inv flag can be
+			 * set in other functions and we want to keep the same
+			 * logic.
+			 */
+			ctx->base.needs_inv = true;
+
+		if (ctx->base.needs_inv) {
+			ctx->base.needs_inv = false;
+			req->needs_inv = true;
+		}
+	} else {
+		ctx->base.ring = safexcel_select_ring(priv);
+		ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
+						 EIP197_GFP_FLAGS(areq->base),
+						 &ctx->base.ctxr_dma);
+		if (!ctx->base.ctxr)
+			return -ENOMEM;
+	}
+
+	ring = ctx->base.ring;
+
+	spin_lock_bh(&priv->ring[ring].queue_lock);
+	ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
+	spin_unlock_bh(&priv->ring[ring].queue_lock);
+
+	queue_work(priv->ring[ring].workqueue,
+		   &priv->ring[ring].work_data.work);
+
+	return ret;
+}
+
+static int safexcel_ahash_update(struct ahash_request *areq)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	int ret;
+
+	/* If the request is 0 length, do nothing */
+	if (!areq->nbytes)
+		return 0;
+
+	/* Add request to the cache if it fits */
+	ret = safexcel_ahash_cache(areq);
+
+	/* Update total request length */
+	req->len += areq->nbytes;
+
+	/* If not all data could fit into the cache, go process the excess.
+	 * Also go process immediately for an HMAC IV precompute, which
+	 * will never be finished at all, but needs to be processed anyway.
+	 */
+	if ((ret && !req->finish) || req->last_req)
+		return safexcel_ahash_enqueue(areq);
+
+	return 0;
+}
+
+static int safexcel_ahash_final(struct ahash_request *areq)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+
+	req->finish = true;
+
+	if (unlikely(!req->len && !areq->nbytes)) {
+		/*
+		 * If we have an overall 0 length *hash* request:
+		 * The HW cannot do 0 length hash, so we provide the correct
+		 * result directly here.
+		 */
+		if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
+			memcpy(areq->result, md5_zero_message_hash,
+			       MD5_DIGEST_SIZE);
+		else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
+			memcpy(areq->result, sha1_zero_message_hash,
+			       SHA1_DIGEST_SIZE);
+		else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224)
+			memcpy(areq->result, sha224_zero_message_hash,
+			       SHA224_DIGEST_SIZE);
+		else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
+			memcpy(areq->result, sha256_zero_message_hash,
+			       SHA256_DIGEST_SIZE);
+		else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384)
+			memcpy(areq->result, sha384_zero_message_hash,
+			       SHA384_DIGEST_SIZE);
+		else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512)
+			memcpy(areq->result, sha512_zero_message_hash,
+			       SHA512_DIGEST_SIZE);
+
+		return 0;
+	} else if (unlikely(req->hmac &&
+			    (req->len == req->block_sz) &&
+			    !areq->nbytes)) {
+		/*
+		 * If we have an overall 0 length *HMAC* request:
+		 * For HMAC, we need to finalize the inner digest
+		 * and then perform the outer hash.
+		 */
+
+		/* generate pad block in the cache */
+		/* start with a hash block of all zeroes */
+		memset(req->cache, 0, req->block_sz);
+		/* set the first byte to 0x80 to 'append a 1 bit' */
+		req->cache[0] = 0x80;
+		/* add the length in bits in the last 2 bytes */
+		if (req->len_is_le) {
+			/* Little endian length word (e.g. MD5) */
+			req->cache[req->block_sz-8] = (req->block_sz << 3) &
+						      255;
+			req->cache[req->block_sz-7] = (req->block_sz >> 5);
+		} else {
+			/* Big endian length word (e.g. any SHA) */
+			req->cache[req->block_sz-2] = (req->block_sz >> 5);
+			req->cache[req->block_sz-1] = (req->block_sz << 3) &
+						      255;
+		}
+
+		req->len += req->block_sz; /* plus 1 hash block */
+
+		/* Set special zero-length HMAC flag */
+		req->hmac_zlen = true;
+
+		/* Finalize HMAC */
+		req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+	} else if (req->hmac) {
+		/* Finalize HMAC */
+		req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+	}
+
+	return safexcel_ahash_enqueue(areq);
+}
+
+static int safexcel_ahash_finup(struct ahash_request *areq)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	req->finish = true;
+
+	safexcel_ahash_update(areq);
+	return safexcel_ahash_final(areq);
+}
+
+static int safexcel_ahash_export(struct ahash_request *areq, void *out)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	struct safexcel_ahash_export_state *export = out;
+
+	export->len = req->len;
+	export->processed = req->processed;
+
+	export->digest = req->digest;
+
+	memcpy(export->state, req->state, req->state_sz);
+	memcpy(export->cache, req->cache, HASH_CACHE_SIZE);
+
+	return 0;
+}
+
+static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
+{
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+	const struct safexcel_ahash_export_state *export = in;
+	int ret;
+
+	ret = crypto_ahash_init(areq);
+	if (ret)
+		return ret;
+
+	req->len = export->len;
+	req->processed = export->processed;
+
+	req->digest = export->digest;
+
+	memcpy(req->cache, export->cache, HASH_CACHE_SIZE);
+	memcpy(req->state, export->state, req->state_sz);
+
+	return 0;
+}
+
+static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_alg_template *tmpl =
+		container_of(__crypto_ahash_alg(tfm->__crt_alg),
+			     struct safexcel_alg_template, alg.ahash);
+
+	ctx->priv = tmpl->priv;
+	ctx->base.send = safexcel_ahash_send;
+	ctx->base.handle_result = safexcel_handle_result;
+
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct safexcel_ahash_req));
+	return 0;
+}
+
+static int safexcel_sha1_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	memset(req, 0, sizeof(*req));
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = SHA1_DIGEST_SIZE;
+	req->block_sz = SHA1_BLOCK_SIZE;
+
+	return 0;
+}
+
+static int safexcel_sha1_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_sha1_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	int ret;
+
+	/* context not allocated, skip invalidation */
+	if (!ctx->base.ctxr)
+		return;
+
+	if (priv->flags & EIP197_TRC_CACHE) {
+		ret = safexcel_ahash_exit_inv(tfm);
+		if (ret)
+			dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
+	} else {
+		dma_pool_free(priv->context_pool, ctx->base.ctxr,
+			      ctx->base.ctxr_dma);
+	}
+}
+
+struct safexcel_alg_template safexcel_alg_sha1 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.algo_mask = SAFEXCEL_ALG_SHA1,
+	.alg.ahash = {
+		.init = safexcel_sha1_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_sha1_digest,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA1_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "sha1",
+				.cra_driver_name = "safexcel-sha1",
+				.cra_priority = SAFEXCEL_CRA_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA1_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_hmac_sha1_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	memset(req, 0, sizeof(*req));
+
+	/* Start from ipad precompute */
+	memcpy(req->state, ctx->ipad, SHA1_DIGEST_SIZE);
+	/* Already processed the key^ipad part now! */
+	req->len	= SHA1_BLOCK_SIZE;
+	req->processed	= SHA1_BLOCK_SIZE;
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = SHA1_DIGEST_SIZE;
+	req->block_sz = SHA1_BLOCK_SIZE;
+	req->hmac = true;
+
+	return 0;
+}
+
+static int safexcel_hmac_sha1_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_hmac_sha1_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_ahash_result {
+	struct completion completion;
+	int error;
+};
+
+static void safexcel_ahash_complete(struct crypto_async_request *req, int error)
+{
+	struct safexcel_ahash_result *result = req->data;
+
+	if (error == -EINPROGRESS)
+		return;
+
+	result->error = error;
+	complete(&result->completion);
+}
+
+static int safexcel_hmac_init_pad(struct ahash_request *areq,
+				  unsigned int blocksize, const u8 *key,
+				  unsigned int keylen, u8 *ipad, u8 *opad)
+{
+	struct safexcel_ahash_result result;
+	struct scatterlist sg;
+	int ret, i;
+	u8 *keydup;
+
+	if (keylen <= blocksize) {
+		memcpy(ipad, key, keylen);
+	} else {
+		keydup = kmemdup(key, keylen, GFP_KERNEL);
+		if (!keydup)
+			return -ENOMEM;
+
+		ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
+					   safexcel_ahash_complete, &result);
+		sg_init_one(&sg, keydup, keylen);
+		ahash_request_set_crypt(areq, &sg, ipad, keylen);
+		init_completion(&result.completion);
+
+		ret = crypto_ahash_digest(areq);
+		if (ret == -EINPROGRESS || ret == -EBUSY) {
+			wait_for_completion_interruptible(&result.completion);
+			ret = result.error;
+		}
+
+		/* Avoid leaking */
+		memzero_explicit(keydup, keylen);
+		kfree(keydup);
+
+		if (ret)
+			return ret;
+
+		keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
+	}
+
+	memset(ipad + keylen, 0, blocksize - keylen);
+	memcpy(opad, ipad, blocksize);
+
+	for (i = 0; i < blocksize; i++) {
+		ipad[i] ^= HMAC_IPAD_VALUE;
+		opad[i] ^= HMAC_OPAD_VALUE;
+	}
+
+	return 0;
+}
+
+static int safexcel_hmac_init_iv(struct ahash_request *areq,
+				 unsigned int blocksize, u8 *pad, void *state)
+{
+	struct safexcel_ahash_result result;
+	struct safexcel_ahash_req *req;
+	struct scatterlist sg;
+	int ret;
+
+	ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				   safexcel_ahash_complete, &result);
+	sg_init_one(&sg, pad, blocksize);
+	ahash_request_set_crypt(areq, &sg, pad, blocksize);
+	init_completion(&result.completion);
+
+	ret = crypto_ahash_init(areq);
+	if (ret)
+		return ret;
+
+	req = ahash_request_ctx(areq);
+	req->hmac = true;
+	req->last_req = true;
+
+	ret = crypto_ahash_update(areq);
+	if (ret && ret != -EINPROGRESS && ret != -EBUSY)
+		return ret;
+
+	wait_for_completion_interruptible(&result.completion);
+	if (result.error)
+		return result.error;
+
+	return crypto_ahash_export(areq, state);
+}
+
+int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
+			 void *istate, void *ostate)
+{
+	struct ahash_request *areq;
+	struct crypto_ahash *tfm;
+	unsigned int blocksize;
+	u8 *ipad, *opad;
+	int ret;
+
+	tfm = crypto_alloc_ahash(alg, 0, 0);
+	if (IS_ERR(tfm))
+		return PTR_ERR(tfm);
+
+	areq = ahash_request_alloc(tfm, GFP_KERNEL);
+	if (!areq) {
+		ret = -ENOMEM;
+		goto free_ahash;
+	}
+
+	crypto_ahash_clear_flags(tfm, ~0);
+	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+
+	ipad = kcalloc(2, blocksize, GFP_KERNEL);
+	if (!ipad) {
+		ret = -ENOMEM;
+		goto free_request;
+	}
+
+	opad = ipad + blocksize;
+
+	ret = safexcel_hmac_init_pad(areq, blocksize, key, keylen, ipad, opad);
+	if (ret)
+		goto free_ipad;
+
+	ret = safexcel_hmac_init_iv(areq, blocksize, ipad, istate);
+	if (ret)
+		goto free_ipad;
+
+	ret = safexcel_hmac_init_iv(areq, blocksize, opad, ostate);
+
+free_ipad:
+	kfree(ipad);
+free_request:
+	ahash_request_free(areq);
+free_ahash:
+	crypto_free_ahash(tfm);
+
+	return ret;
+}
+
+static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key,
+				    unsigned int keylen, const char *alg,
+				    unsigned int state_sz)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+	struct safexcel_crypto_priv *priv = ctx->priv;
+	struct safexcel_ahash_export_state istate, ostate;
+	int ret;
+
+	ret = safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate);
+	if (ret)
+		return ret;
+
+	if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr &&
+	    (memcmp(ctx->ipad, istate.state, state_sz) ||
+	     memcmp(ctx->opad, ostate.state, state_sz)))
+		ctx->base.needs_inv = true;
+
+	memcpy(ctx->ipad, &istate.state, state_sz);
+	memcpy(ctx->opad, &ostate.state, state_sz);
+
+	return 0;
+}
+
+static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
+				     unsigned int keylen)
+{
+	return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha1",
+					SHA1_DIGEST_SIZE);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.algo_mask = SAFEXCEL_ALG_SHA1,
+	.alg.ahash = {
+		.init = safexcel_hmac_sha1_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_hmac_sha1_digest,
+		.setkey = safexcel_hmac_sha1_setkey,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA1_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "hmac(sha1)",
+				.cra_driver_name = "safexcel-hmac-sha1",
+				.cra_priority = SAFEXCEL_CRA_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA1_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_sha256_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	memset(req, 0, sizeof(*req));
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = SHA256_DIGEST_SIZE;
+	req->block_sz = SHA256_BLOCK_SIZE;
+
+	return 0;
+}
+
+static int safexcel_sha256_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_sha256_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_sha256 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.algo_mask = SAFEXCEL_ALG_SHA2_256,
+	.alg.ahash = {
+		.init = safexcel_sha256_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_sha256_digest,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA256_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "sha256",
+				.cra_driver_name = "safexcel-sha256",
+				.cra_priority = SAFEXCEL_CRA_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA256_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_sha224_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	memset(req, 0, sizeof(*req));
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = SHA256_DIGEST_SIZE;
+	req->block_sz = SHA256_BLOCK_SIZE;
+
+	return 0;
+}
+
+static int safexcel_sha224_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_sha224_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_sha224 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.algo_mask = SAFEXCEL_ALG_SHA2_256,
+	.alg.ahash = {
+		.init = safexcel_sha224_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_sha224_digest,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA224_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "sha224",
+				.cra_driver_name = "safexcel-sha224",
+				.cra_priority = SAFEXCEL_CRA_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA224_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_hmac_sha224_setkey(struct crypto_ahash *tfm, const u8 *key,
+				       unsigned int keylen)
+{
+	return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha224",
+					SHA256_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sha224_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	memset(req, 0, sizeof(*req));
+
+	/* Start from ipad precompute */
+	memcpy(req->state, ctx->ipad, SHA256_DIGEST_SIZE);
+	/* Already processed the key^ipad part now! */
+	req->len	= SHA256_BLOCK_SIZE;
+	req->processed	= SHA256_BLOCK_SIZE;
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = SHA256_DIGEST_SIZE;
+	req->block_sz = SHA256_BLOCK_SIZE;
+	req->hmac = true;
+
+	return 0;
+}
+
+static int safexcel_hmac_sha224_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_hmac_sha224_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha224 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.algo_mask = SAFEXCEL_ALG_SHA2_256,
+	.alg.ahash = {
+		.init = safexcel_hmac_sha224_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_hmac_sha224_digest,
+		.setkey = safexcel_hmac_sha224_setkey,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA224_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "hmac(sha224)",
+				.cra_driver_name = "safexcel-hmac-sha224",
+				.cra_priority = SAFEXCEL_CRA_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA224_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_hmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
+				     unsigned int keylen)
+{
+	return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha256",
+					SHA256_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sha256_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	memset(req, 0, sizeof(*req));
+
+	/* Start from ipad precompute */
+	memcpy(req->state, ctx->ipad, SHA256_DIGEST_SIZE);
+	/* Already processed the key^ipad part now! */
+	req->len	= SHA256_BLOCK_SIZE;
+	req->processed	= SHA256_BLOCK_SIZE;
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = SHA256_DIGEST_SIZE;
+	req->block_sz = SHA256_BLOCK_SIZE;
+	req->hmac = true;
+
+	return 0;
+}
+
+static int safexcel_hmac_sha256_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_hmac_sha256_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.algo_mask = SAFEXCEL_ALG_SHA2_256,
+	.alg.ahash = {
+		.init = safexcel_hmac_sha256_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_hmac_sha256_digest,
+		.setkey = safexcel_hmac_sha256_setkey,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA256_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "hmac(sha256)",
+				.cra_driver_name = "safexcel-hmac-sha256",
+				.cra_priority = SAFEXCEL_CRA_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA256_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_sha512_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	memset(req, 0, sizeof(*req));
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = SHA512_DIGEST_SIZE;
+	req->block_sz = SHA512_BLOCK_SIZE;
+
+	return 0;
+}
+
+static int safexcel_sha512_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_sha512_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_sha512 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.algo_mask = SAFEXCEL_ALG_SHA2_512,
+	.alg.ahash = {
+		.init = safexcel_sha512_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_sha512_digest,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA512_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "sha512",
+				.cra_driver_name = "safexcel-sha512",
+				.cra_priority = SAFEXCEL_CRA_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA512_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_sha384_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	memset(req, 0, sizeof(*req));
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = SHA512_DIGEST_SIZE;
+	req->block_sz = SHA512_BLOCK_SIZE;
+
+	return 0;
+}
+
+static int safexcel_sha384_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_sha384_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_sha384 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.algo_mask = SAFEXCEL_ALG_SHA2_512,
+	.alg.ahash = {
+		.init = safexcel_sha384_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_sha384_digest,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA384_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "sha384",
+				.cra_driver_name = "safexcel-sha384",
+				.cra_priority = SAFEXCEL_CRA_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA384_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_hmac_sha512_setkey(struct crypto_ahash *tfm, const u8 *key,
+				       unsigned int keylen)
+{
+	return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha512",
+					SHA512_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sha512_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	memset(req, 0, sizeof(*req));
+
+	/* Start from ipad precompute */
+	memcpy(req->state, ctx->ipad, SHA512_DIGEST_SIZE);
+	/* Already processed the key^ipad part now! */
+	req->len	= SHA512_BLOCK_SIZE;
+	req->processed	= SHA512_BLOCK_SIZE;
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = SHA512_DIGEST_SIZE;
+	req->block_sz = SHA512_BLOCK_SIZE;
+	req->hmac = true;
+
+	return 0;
+}
+
+static int safexcel_hmac_sha512_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_hmac_sha512_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha512 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.algo_mask = SAFEXCEL_ALG_SHA2_512,
+	.alg.ahash = {
+		.init = safexcel_hmac_sha512_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_hmac_sha512_digest,
+		.setkey = safexcel_hmac_sha512_setkey,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA512_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "hmac(sha512)",
+				.cra_driver_name = "safexcel-hmac-sha512",
+				.cra_priority = SAFEXCEL_CRA_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA512_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_hmac_sha384_setkey(struct crypto_ahash *tfm, const u8 *key,
+				       unsigned int keylen)
+{
+	return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha384",
+					SHA512_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_sha384_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	memset(req, 0, sizeof(*req));
+
+	/* Start from ipad precompute */
+	memcpy(req->state, ctx->ipad, SHA512_DIGEST_SIZE);
+	/* Already processed the key^ipad part now! */
+	req->len	= SHA512_BLOCK_SIZE;
+	req->processed	= SHA512_BLOCK_SIZE;
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = SHA512_DIGEST_SIZE;
+	req->block_sz = SHA512_BLOCK_SIZE;
+	req->hmac = true;
+
+	return 0;
+}
+
+static int safexcel_hmac_sha384_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_hmac_sha384_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha384 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.algo_mask = SAFEXCEL_ALG_SHA2_512,
+	.alg.ahash = {
+		.init = safexcel_hmac_sha384_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_hmac_sha384_digest,
+		.setkey = safexcel_hmac_sha384_setkey,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = SHA384_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "hmac(sha384)",
+				.cra_driver_name = "safexcel-hmac-sha384",
+				.cra_priority = SAFEXCEL_CRA_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = SHA384_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_md5_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	memset(req, 0, sizeof(*req));
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = MD5_DIGEST_SIZE;
+	req->block_sz = MD5_HMAC_BLOCK_SIZE;
+
+	return 0;
+}
+
+static int safexcel_md5_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_md5_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_md5 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.algo_mask = SAFEXCEL_ALG_MD5,
+	.alg.ahash = {
+		.init = safexcel_md5_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_md5_digest,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = MD5_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "md5",
+				.cra_driver_name = "safexcel-md5",
+				.cra_priority = SAFEXCEL_CRA_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
+
+static int safexcel_hmac_md5_init(struct ahash_request *areq)
+{
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+	memset(req, 0, sizeof(*req));
+
+	/* Start from ipad precompute */
+	memcpy(req->state, ctx->ipad, MD5_DIGEST_SIZE);
+	/* Already processed the key^ipad part now! */
+	req->len	= MD5_HMAC_BLOCK_SIZE;
+	req->processed	= MD5_HMAC_BLOCK_SIZE;
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = MD5_DIGEST_SIZE;
+	req->block_sz = MD5_HMAC_BLOCK_SIZE;
+	req->len_is_le = true; /* MD5 is little endian! ... */
+	req->hmac = true;
+
+	return 0;
+}
+
+static int safexcel_hmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
+				     unsigned int keylen)
+{
+	return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-md5",
+					MD5_DIGEST_SIZE);
+}
+
+static int safexcel_hmac_md5_digest(struct ahash_request *areq)
+{
+	int ret = safexcel_hmac_md5_init(areq);
+
+	if (ret)
+		return ret;
+
+	return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_md5 = {
+	.type = SAFEXCEL_ALG_TYPE_AHASH,
+	.algo_mask = SAFEXCEL_ALG_MD5,
+	.alg.ahash = {
+		.init = safexcel_hmac_md5_init,
+		.update = safexcel_ahash_update,
+		.final = safexcel_ahash_final,
+		.finup = safexcel_ahash_finup,
+		.digest = safexcel_hmac_md5_digest,
+		.setkey = safexcel_hmac_md5_setkey,
+		.export = safexcel_ahash_export,
+		.import = safexcel_ahash_import,
+		.halg = {
+			.digestsize = MD5_DIGEST_SIZE,
+			.statesize = sizeof(struct safexcel_ahash_export_state),
+			.base = {
+				.cra_name = "hmac(md5)",
+				.cra_driver_name = "safexcel-hmac-md5",
+				.cra_priority = SAFEXCEL_CRA_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+				.cra_init = safexcel_ahash_cra_init,
+				.cra_exit = safexcel_ahash_cra_exit,
+				.cra_module = THIS_MODULE,
+			},
+		},
+	},
+};
diff --git a/marvell/linux/drivers/crypto/inside-secure/safexcel_ring.c b/marvell/linux/drivers/crypto/inside-secure/safexcel_ring.c
new file mode 100644
index 0000000..0f269b8
--- /dev/null
+++ b/marvell/linux/drivers/crypto/inside-secure/safexcel_ring.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Marvell
+ *
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+
+#include "safexcel.h"
+
+int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
+				   struct safexcel_desc_ring *cdr,
+				   struct safexcel_desc_ring *rdr)
+{
+	cdr->offset = sizeof(u32) * priv->config.cd_offset;
+	cdr->base = dmam_alloc_coherent(priv->dev,
+					cdr->offset * EIP197_DEFAULT_RING_SIZE,
+					&cdr->base_dma, GFP_KERNEL);
+	if (!cdr->base)
+		return -ENOMEM;
+	cdr->write = cdr->base;
+	cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
+	cdr->read = cdr->base;
+
+	rdr->offset = sizeof(u32) * priv->config.rd_offset;
+	rdr->base = dmam_alloc_coherent(priv->dev,
+					rdr->offset * EIP197_DEFAULT_RING_SIZE,
+					&rdr->base_dma, GFP_KERNEL);
+	if (!rdr->base)
+		return -ENOMEM;
+	rdr->write = rdr->base;
+	rdr->base_end = rdr->base + rdr->offset  * (EIP197_DEFAULT_RING_SIZE - 1);
+	rdr->read = rdr->base;
+
+	return 0;
+}
+
+inline int safexcel_select_ring(struct safexcel_crypto_priv *priv)
+{
+	return (atomic_inc_return(&priv->ring_used) % priv->config.rings);
+}
+
+static void *safexcel_ring_next_wptr(struct safexcel_crypto_priv *priv,
+				     struct safexcel_desc_ring *ring)
+{
+	void *ptr = ring->write;
+
+	if ((ring->write == ring->read - ring->offset) ||
+	    (ring->read == ring->base && ring->write == ring->base_end))
+		return ERR_PTR(-ENOMEM);
+
+	if (ring->write == ring->base_end)
+		ring->write = ring->base;
+	else
+		ring->write += ring->offset;
+
+	return ptr;
+}
+
+void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
+			      struct safexcel_desc_ring *ring)
+{
+	void *ptr = ring->read;
+
+	if (ring->write == ring->read)
+		return ERR_PTR(-ENOENT);
+
+	if (ring->read == ring->base_end)
+		ring->read = ring->base;
+	else
+		ring->read += ring->offset;
+
+	return ptr;
+}
+
+inline void *safexcel_ring_curr_rptr(struct safexcel_crypto_priv *priv,
+				     int ring)
+{
+	struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
+
+	return rdr->read;
+}
+
+inline int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv,
+					 int ring)
+{
+	struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
+
+	return (rdr->read - rdr->base) / rdr->offset;
+}
+
+inline int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv,
+					 int ring,
+					 struct safexcel_result_desc *rdesc)
+{
+	struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
+
+	return ((void *)rdesc - rdr->base) / rdr->offset;
+}
+
+void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
+				 struct safexcel_desc_ring *ring)
+{
+	if (ring->write == ring->read)
+		return;
+
+	if (ring->write == ring->base)
+		ring->write = ring->base_end;
+	else
+		ring->write -= ring->offset;
+}
+
+struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
+						 int ring_id,
+						 bool first, bool last,
+						 dma_addr_t data, u32 data_len,
+						 u32 full_data_len,
+						 dma_addr_t context) {
+	struct safexcel_command_desc *cdesc;
+	int i;
+
+	cdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].cdr);
+	if (IS_ERR(cdesc))
+		return cdesc;
+
+	memset(cdesc, 0, sizeof(struct safexcel_command_desc));
+
+	cdesc->first_seg = first;
+	cdesc->last_seg = last;
+	cdesc->particle_size = data_len;
+	cdesc->data_lo = lower_32_bits(data);
+	cdesc->data_hi = upper_32_bits(data);
+
+	if (first && context) {
+		struct safexcel_token *token =
+			(struct safexcel_token *)cdesc->control_data.token;
+
+		/*
+		 * Note that the length here MUST be >0 or else the EIP(1)97
+		 * may hang. Newer EIP197 firmware actually incorporates this
+		 * fix already, but that doesn't help the EIP97 and we may
+		 * also be running older firmware.
+		 */
+		cdesc->control_data.packet_length = full_data_len ?: 1;
+		cdesc->control_data.options = EIP197_OPTION_MAGIC_VALUE |
+					      EIP197_OPTION_64BIT_CTX |
+					      EIP197_OPTION_CTX_CTRL_IN_CMD;
+		cdesc->control_data.context_lo =
+			(lower_32_bits(context) & GENMASK(31, 2)) >> 2;
+		cdesc->control_data.context_hi = upper_32_bits(context);
+
+		if (priv->version == EIP197B_MRVL ||
+		    priv->version == EIP197D_MRVL)
+			cdesc->control_data.options |= EIP197_OPTION_RC_AUTO;
+
+		/* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */
+		cdesc->control_data.refresh = 2;
+
+		for (i = 0; i < EIP197_MAX_TOKENS; i++)
+			eip197_noop_token(&token[i]);
+	}
+
+	return cdesc;
+}
+
+struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *priv,
+						int ring_id,
+						bool first, bool last,
+						dma_addr_t data, u32 len)
+{
+	struct safexcel_result_desc *rdesc;
+
+	rdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].rdr);
+	if (IS_ERR(rdesc))
+		return rdesc;
+
+	memset(rdesc, 0, sizeof(struct safexcel_result_desc));
+
+	rdesc->first_seg = first;
+	rdesc->last_seg = last;
+	rdesc->particle_size = len;
+	rdesc->data_lo = lower_32_bits(data);
+	rdesc->data_hi = upper_32_bits(data);
+
+	return rdesc;
+}