[Feature]add MT2731_MP2_MR2_SVN388 baseline version

Change-Id: Ief04314834b31e27effab435d3ca8ba33b499059
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/Kconfig b/src/kernel/linux/v4.14/drivers/crypto/qat/Kconfig
new file mode 100644
index 0000000..ce3cae4
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/Kconfig
@@ -0,0 +1,83 @@
+config CRYPTO_DEV_QAT
+	tristate
+	select CRYPTO_AEAD
+	select CRYPTO_AUTHENC
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_AKCIPHER
+	select CRYPTO_DH
+	select CRYPTO_HMAC
+	select CRYPTO_RSA
+	select CRYPTO_SHA1
+	select CRYPTO_SHA256
+	select CRYPTO_SHA512
+	select FW_LOADER
+
+config CRYPTO_DEV_QAT_DH895xCC
+	tristate "Support for Intel(R) DH895xCC"
+	depends on X86 && PCI
+	select CRYPTO_DEV_QAT
+	help
+	  Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
+	  for accelerating crypto and compression workloads.
+
+	  To compile this as a module, choose M here: the module
+	  will be called qat_dh895xcc.
+
+config CRYPTO_DEV_QAT_C3XXX
+	tristate "Support for Intel(R) C3XXX"
+	depends on X86 && PCI
+	select CRYPTO_DEV_QAT
+	help
+	  Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology
+	  for accelerating crypto and compression workloads.
+
+	  To compile this as a module, choose M here: the module
+	  will be called qat_c3xxx.
+
+config CRYPTO_DEV_QAT_C62X
+	tristate "Support for Intel(R) C62X"
+	depends on X86 && PCI
+	select CRYPTO_DEV_QAT
+	help
+	  Support for Intel(R) C62x with Intel(R) QuickAssist Technology
+	  for accelerating crypto and compression workloads.
+
+	  To compile this as a module, choose M here: the module
+	  will be called qat_c62x.
+
+config CRYPTO_DEV_QAT_DH895xCCVF
+	tristate "Support for Intel(R) DH895xCC Virtual Function"
+	depends on X86 && PCI
+	select PCI_IOV
+	select CRYPTO_DEV_QAT
+
+	help
+	  Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
+	  Virtual Function for accelerating crypto and compression workloads.
+
+	  To compile this as a module, choose M here: the module
+	  will be called qat_dh895xccvf.
+
+config CRYPTO_DEV_QAT_C3XXXVF
+	tristate "Support for Intel(R) C3XXX Virtual Function"
+	depends on X86 && PCI
+	select PCI_IOV
+	select CRYPTO_DEV_QAT
+	help
+	  Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology
+	  Virtual Function for accelerating crypto and compression workloads.
+
+	  To compile this as a module, choose M here: the module
+	  will be called qat_c3xxxvf.
+
+config CRYPTO_DEV_QAT_C62XVF
+	tristate "Support for Intel(R) C62X Virtual Function"
+	depends on X86 && PCI
+	select PCI_IOV
+	select CRYPTO_DEV_QAT
+	help
+	  Support for Intel(R) C62x with Intel(R) QuickAssist Technology
+	  Virtual Function for accelerating crypto and compression workloads.
+
+	  To compile this as a module, choose M here: the module
+	  will be called qat_c62xvf.
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/Makefile b/src/kernel/linux/v4.14/drivers/crypto/qat/Makefile
new file mode 100644
index 0000000..7dd15e7
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
+obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/
+obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/
+obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf/
+obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf/
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c3xxx/Makefile b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c3xxx/Makefile
new file mode 100644
index 0000000..8f5fd48
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c3xxx/Makefile
@@ -0,0 +1,3 @@
+ccflags-y := -I$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o
+qat_c3xxx-objs := adf_drv.o adf_c3xxx_hw_data.o
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
new file mode 100644
index 0000000..6bc68bc
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -0,0 +1,239 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+	* Redistributions of source code must retain the above copyright
+	  notice, this list of conditions and the following disclaimer.
+	* Redistributions in binary form must reproduce the above copyright
+	  notice, this list of conditions and the following disclaimer in
+	  the documentation and/or other materials provided with the
+	  distribution.
+	* Neither the name of Intel Corporation nor the names of its
+	  contributors may be used to endorse or promote products derived
+	  from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_pf2vf_msg.h>
+#include "adf_c3xxx_hw_data.h"
+
+/* Worker thread to service arbiter mappings based on dev SKUs */
+static const u32 thrd_to_arb_map_6_me_sku[] = {
+	0x12222AAA, 0x11222AAA, 0x12222AAA,
+	0x11222AAA, 0x12222AAA, 0x11222AAA
+};
+
+static struct adf_hw_device_class c3xxx_class = {
+	.name = ADF_C3XXX_DEVICE_NAME,
+	.type = DEV_C3XXX,
+	.instances = 0
+};
+
+static u32 get_accel_mask(u32 fuse)
+{
+	return (~fuse) >> ADF_C3XXX_ACCELERATORS_REG_OFFSET &
+		ADF_C3XXX_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(u32 fuse)
+{
+	return (~fuse) & ADF_C3XXX_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+	u32 i, ctr = 0;
+
+	if (!self || !self->accel_mask)
+		return 0;
+
+	for (i = 0; i < ADF_C3XXX_MAX_ACCELERATORS; i++) {
+		if (self->accel_mask & (1 << i))
+			ctr++;
+	}
+	return ctr;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+	u32 i, ctr = 0;
+
+	if (!self || !self->ae_mask)
+		return 0;
+
+	for (i = 0; i < ADF_C3XXX_MAX_ACCELENGINES; i++) {
+		if (self->ae_mask & (1 << i))
+			ctr++;
+	}
+	return ctr;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_C3XXX_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_C3XXX_ETR_BAR;
+}
+
+static u32 get_sram_bar_id(struct adf_hw_device_data *self)
+{
+	return 0;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+	int aes = get_num_aes(self);
+
+	if (aes == 6)
+		return DEV_SKU_4;
+
+	return DEV_SKU_UNKNOWN;
+}
+
+static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
+				    u32 const **arb_map_config)
+{
+	switch (accel_dev->accel_pci_dev.sku) {
+	case DEV_SKU_4:
+		*arb_map_config = thrd_to_arb_map_6_me_sku;
+		break;
+	default:
+		dev_err(&GET_DEV(accel_dev),
+			"The configuration doesn't match any SKU");
+		*arb_map_config = NULL;
+	}
+}
+
+static u32 get_pf2vf_offset(u32 i)
+{
+	return ADF_C3XXX_PF2VF_OFFSET(i);
+}
+
+static u32 get_vintmsk_offset(u32 i)
+{
+	return ADF_C3XXX_VINTMSK_OFFSET(i);
+}
+
+static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
+{
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+	struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C3XXX_PMISC_BAR];
+	void __iomem *csr = misc_bar->virt_addr;
+	unsigned int val, i;
+
+	/* Enable Accel Engine error detection & correction */
+	for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
+		val = ADF_CSR_RD(csr, ADF_C3XXX_AE_CTX_ENABLES(i));
+		val |= ADF_C3XXX_ENABLE_AE_ECC_ERR;
+		ADF_CSR_WR(csr, ADF_C3XXX_AE_CTX_ENABLES(i), val);
+		val = ADF_CSR_RD(csr, ADF_C3XXX_AE_MISC_CONTROL(i));
+		val |= ADF_C3XXX_ENABLE_AE_ECC_PARITY_CORR;
+		ADF_CSR_WR(csr, ADF_C3XXX_AE_MISC_CONTROL(i), val);
+	}
+
+	/* Enable shared memory error detection & correction */
+	for (i = 0; i < hw_device->get_num_accels(hw_device); i++) {
+		val = ADF_CSR_RD(csr, ADF_C3XXX_UERRSSMSH(i));
+		val |= ADF_C3XXX_ERRSSMSH_EN;
+		ADF_CSR_WR(csr, ADF_C3XXX_UERRSSMSH(i), val);
+		val = ADF_CSR_RD(csr, ADF_C3XXX_CERRSSMSH(i));
+		val |= ADF_C3XXX_ERRSSMSH_EN;
+		ADF_CSR_WR(csr, ADF_C3XXX_CERRSSMSH(i), val);
+	}
+}
+
+static void adf_enable_ints(struct adf_accel_dev *accel_dev)
+{
+	void __iomem *addr;
+
+	addr = (&GET_BARS(accel_dev)[ADF_C3XXX_PMISC_BAR])->virt_addr;
+
+	/* Enable bundle and misc interrupts */
+	ADF_CSR_WR(addr, ADF_C3XXX_SMIAPF0_MASK_OFFSET,
+		   ADF_C3XXX_SMIA0_MASK);
+	ADF_CSR_WR(addr, ADF_C3XXX_SMIAPF1_MASK_OFFSET,
+		   ADF_C3XXX_SMIA1_MASK);
+}
+
+static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+{
+	return 0;
+}
+
+void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class = &c3xxx_class;
+	hw_data->instance_id = c3xxx_class.instances++;
+	hw_data->num_banks = ADF_C3XXX_ETR_MAX_BANKS;
+	hw_data->num_accel = ADF_C3XXX_MAX_ACCELERATORS;
+	hw_data->num_logical_accel = 1;
+	hw_data->num_engines = ADF_C3XXX_MAX_ACCELENGINES;
+	hw_data->tx_rx_gap = ADF_C3XXX_RX_RINGS_OFFSET;
+	hw_data->tx_rings_mask = ADF_C3XXX_TX_RINGS_MASK;
+	hw_data->alloc_irq = adf_isr_resource_alloc;
+	hw_data->free_irq = adf_isr_resource_free;
+	hw_data->enable_error_correction = adf_enable_error_correction;
+	hw_data->get_accel_mask = get_accel_mask;
+	hw_data->get_ae_mask = get_ae_mask;
+	hw_data->get_num_accels = get_num_accels;
+	hw_data->get_num_aes = get_num_aes;
+	hw_data->get_sram_bar_id = get_sram_bar_id;
+	hw_data->get_etr_bar_id = get_etr_bar_id;
+	hw_data->get_misc_bar_id = get_misc_bar_id;
+	hw_data->get_pf2vf_offset = get_pf2vf_offset;
+	hw_data->get_vintmsk_offset = get_vintmsk_offset;
+	hw_data->get_sku = get_sku;
+	hw_data->fw_name = ADF_C3XXX_FW;
+	hw_data->fw_mmp_name = ADF_C3XXX_MMP;
+	hw_data->init_admin_comms = adf_init_admin_comms;
+	hw_data->exit_admin_comms = adf_exit_admin_comms;
+	hw_data->disable_iov = adf_disable_sriov;
+	hw_data->send_admin_init = adf_send_admin_init;
+	hw_data->init_arb = adf_init_arb;
+	hw_data->exit_arb = adf_exit_arb;
+	hw_data->get_arb_mapping = adf_get_arbiter_mapping;
+	hw_data->enable_ints = adf_enable_ints;
+	hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
+	hw_data->reset_device = adf_reset_flr;
+	hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+}
+
+void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class->instances--;
+}
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
new file mode 100644
index 0000000..afc9a0a
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
@@ -0,0 +1,83 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_C3XXX_HW_DATA_H_
+#define ADF_C3XXX_HW_DATA_H_
+
+/* PCIe configuration space */
+#define ADF_C3XXX_PMISC_BAR 0
+#define ADF_C3XXX_ETR_BAR 1
+#define ADF_C3XXX_RX_RINGS_OFFSET 8
+#define ADF_C3XXX_TX_RINGS_MASK 0xFF
+#define ADF_C3XXX_MAX_ACCELERATORS 3
+#define ADF_C3XXX_MAX_ACCELENGINES 6
+#define ADF_C3XXX_ACCELERATORS_REG_OFFSET 16
+#define ADF_C3XXX_ACCELERATORS_MASK 0x7
+#define ADF_C3XXX_ACCELENGINES_MASK 0x3F
+#define ADF_C3XXX_ETR_MAX_BANKS 16
+#define ADF_C3XXX_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
+#define ADF_C3XXX_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
+#define ADF_C3XXX_SMIA0_MASK 0xFFFF
+#define ADF_C3XXX_SMIA1_MASK 0x1
+/* Error detection and correction */
+#define ADF_C3XXX_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
+#define ADF_C3XXX_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960)
+#define ADF_C3XXX_ENABLE_AE_ECC_ERR BIT(28)
+#define ADF_C3XXX_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12))
+#define ADF_C3XXX_UERRSSMSH(i) (i * 0x4000 + 0x18)
+#define ADF_C3XXX_CERRSSMSH(i) (i * 0x4000 + 0x10)
+#define ADF_C3XXX_ERRSSMSH_EN BIT(3)
+
+#define ADF_C3XXX_PF2VF_OFFSET(i)	(0x3A000 + 0x280 + ((i) * 0x04))
+#define ADF_C3XXX_VINTMSK_OFFSET(i)	(0x3A000 + 0x200 + ((i) * 0x04))
+
+/* Firmware Binary */
+#define ADF_C3XXX_FW "qat_c3xxx.bin"
+#define ADF_C3XXX_MMP "qat_c3xxx_mmp.bin"
+
+void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c3xxx/adf_drv.c
new file mode 100644
index 0000000..7c470ae
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c3xxx/adf_drv.c
@@ -0,0 +1,333 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_c3xxx_hw_data.h"
+
+#define ADF_SYSTEM_DEVICE(device_id) \
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+	ADF_SYSTEM_DEVICE(ADF_C3XXX_PCI_DEVICE_ID),
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+	.id_table = adf_pci_tbl,
+	.name = ADF_C3XXX_DEVICE_NAME,
+	.probe = adf_probe,
+	.remove = adf_remove,
+	.sriov_configure = adf_sriov_configure,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+	pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+	pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+	int i;
+
+	for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+		if (bar->virt_addr)
+			pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+	}
+
+	if (accel_dev->hw_device) {
+		switch (accel_pci_dev->pci_dev->device) {
+		case ADF_C3XXX_PCI_DEVICE_ID:
+			adf_clean_hw_data_c3xxx(accel_dev->hw_device);
+			break;
+		default:
+			break;
+		}
+		kfree(accel_dev->hw_device);
+		accel_dev->hw_device = NULL;
+	}
+	adf_cfg_dev_remove(accel_dev);
+	debugfs_remove(accel_dev->debugfs_dir);
+	adf_devmgr_rm_dev(accel_dev, NULL);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct adf_accel_dev *accel_dev;
+	struct adf_accel_pci *accel_pci_dev;
+	struct adf_hw_device_data *hw_data;
+	char name[ADF_DEVICE_NAME_LENGTH];
+	unsigned int i, bar_nr;
+	unsigned long bar_mask;
+	int ret;
+
+	switch (ent->device) {
+	case ADF_C3XXX_PCI_DEVICE_ID:
+		break;
+	default:
+		dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+		return -ENODEV;
+	}
+
+	if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+		/* If the accelerator is connected to a node with no memory
+		 * there is no point in using the accelerator since the remote
+		 * memory transaction will be very slow. */
+		dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+		return -EINVAL;
+	}
+
+	accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+				 dev_to_node(&pdev->dev));
+	if (!accel_dev)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&accel_dev->crypto_list);
+	accel_pci_dev = &accel_dev->accel_pci_dev;
+	accel_pci_dev->pci_dev = pdev;
+
+	/* Add accel device to accel table.
+	 * This should be called before adf_cleanup_accel is called */
+	if (adf_devmgr_add_dev(accel_dev, NULL)) {
+		dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+		kfree(accel_dev);
+		return -EFAULT;
+	}
+
+	accel_dev->owner = THIS_MODULE;
+	/* Allocate and configure device configuration structure */
+	hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+			       dev_to_node(&pdev->dev));
+	if (!hw_data) {
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	accel_dev->hw_device = hw_data;
+	adf_init_hw_data_c3xxx(accel_dev->hw_device);
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+	pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
+			      &hw_data->fuses);
+
+	/* Get Accelerators and Accelerators Engines masks */
+	hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
+	hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
+	accel_pci_dev->sku = hw_data->get_sku(hw_data);
+	/* If the device has no acceleration engines then ignore it. */
+	if (!hw_data->accel_mask || !hw_data->ae_mask ||
+	    ((~hw_data->ae_mask) & 0x01)) {
+		dev_err(&pdev->dev, "No acceleration units found");
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* Create dev top level debugfs entry */
+	snprintf(name, sizeof(name), "%s%s_%02x:%02d.%d",
+		 ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
+		 pdev->bus->number, PCI_SLOT(pdev->devfn),
+		 PCI_FUNC(pdev->devfn));
+
+	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+	if (!accel_dev->debugfs_dir) {
+		dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+	/* Create device configuration table */
+	ret = adf_cfg_dev_add(accel_dev);
+	if (ret)
+		goto out_err;
+
+	/* enable PCI device */
+	if (pci_enable_device(pdev)) {
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* set dma identifier */
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+			dev_err(&pdev->dev, "No usable DMA configuration\n");
+			ret = -EFAULT;
+			goto out_err_disable;
+		} else {
+			pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		}
+
+	} else {
+		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	}
+
+	if (pci_request_regions(pdev, ADF_C3XXX_DEVICE_NAME)) {
+		ret = -EFAULT;
+		goto out_err_disable;
+	}
+
+	/* Read accelerator capabilities mask */
+	pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET,
+			      &hw_data->accel_capabilities_mask);
+
+	/* Find and map all the device's BARS */
+	i = 0;
+	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+		bar->base_addr = pci_resource_start(pdev, bar_nr);
+		if (!bar->base_addr)
+			break;
+		bar->size = pci_resource_len(pdev, bar_nr);
+		bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+		if (!bar->virt_addr) {
+			dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+			ret = -EFAULT;
+			goto out_err_free_reg;
+		}
+	}
+	pci_set_master(pdev);
+
+	if (adf_enable_aer(accel_dev, &adf_driver)) {
+		dev_err(&pdev->dev, "Failed to enable aer\n");
+		ret = -EFAULT;
+		goto out_err_free_reg;
+	}
+
+	if (pci_save_state(pdev)) {
+		dev_err(&pdev->dev, "Failed to save pci state\n");
+		ret = -ENOMEM;
+		goto out_err_free_reg;
+	}
+
+	ret = qat_crypto_dev_config(accel_dev);
+	if (ret)
+		goto out_err_free_reg;
+
+	ret = adf_dev_init(accel_dev);
+	if (ret)
+		goto out_err_dev_shutdown;
+
+	ret = adf_dev_start(accel_dev);
+	if (ret)
+		goto out_err_dev_stop;
+
+	return ret;
+
+out_err_dev_stop:
+	adf_dev_stop(accel_dev);
+out_err_dev_shutdown:
+	adf_dev_shutdown(accel_dev);
+out_err_free_reg:
+	pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+	pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+	adf_cleanup_accel(accel_dev);
+	kfree(accel_dev);
+	return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+	if (!accel_dev) {
+		pr_err("QAT: Driver removal failed\n");
+		return;
+	}
+	adf_dev_stop(accel_dev);
+	adf_dev_shutdown(accel_dev);
+	adf_disable_aer(accel_dev);
+	adf_cleanup_accel(accel_dev);
+	adf_cleanup_pci_dev(accel_dev);
+	kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+	request_module("intel_qat");
+
+	if (pci_register_driver(&adf_driver)) {
+		pr_err("QAT: Driver initialization failed\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+	pci_unregister_driver(&adf_driver);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c3xxxvf/Makefile b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c3xxxvf/Makefile
new file mode 100644
index 0000000..16d178e
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c3xxxvf/Makefile
@@ -0,0 +1,3 @@
+ccflags-y := -I$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o
+qat_c3xxxvf-objs := adf_drv.o adf_c3xxxvf_hw_data.o
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
new file mode 100644
index 0000000..d2d0ae4
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
@@ -0,0 +1,150 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include <adf_pf2vf_msg.h>
+#include <adf_common_drv.h>
+#include "adf_c3xxxvf_hw_data.h"
+
+static struct adf_hw_device_class c3xxxiov_class = {
+	.name = ADF_C3XXXVF_DEVICE_NAME,
+	.type = DEV_C3XXXVF,
+	.instances = 0
+};
+
+static u32 get_accel_mask(u32 fuse)
+{
+	return ADF_C3XXXIOV_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(u32 fuse)
+{
+	return ADF_C3XXXIOV_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+	return ADF_C3XXXIOV_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+	return ADF_C3XXXIOV_MAX_ACCELENGINES;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_C3XXXIOV_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_C3XXXIOV_ETR_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+	return DEV_SKU_VF;
+}
+
+static u32 get_pf2vf_offset(u32 i)
+{
+	return ADF_C3XXXIOV_PF2VF_OFFSET;
+}
+
+static u32 get_vintmsk_offset(u32 i)
+{
+	return ADF_C3XXXIOV_VINTMSK_OFFSET;
+}
+
+static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
+{
+	return 0;
+}
+
+static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
+{
+}
+
+void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class = &c3xxxiov_class;
+	hw_data->num_banks = ADF_C3XXXIOV_ETR_MAX_BANKS;
+	hw_data->num_accel = ADF_C3XXXIOV_MAX_ACCELERATORS;
+	hw_data->num_logical_accel = 1;
+	hw_data->num_engines = ADF_C3XXXIOV_MAX_ACCELENGINES;
+	hw_data->tx_rx_gap = ADF_C3XXXIOV_RX_RINGS_OFFSET;
+	hw_data->tx_rings_mask = ADF_C3XXXIOV_TX_RINGS_MASK;
+	hw_data->alloc_irq = adf_vf_isr_resource_alloc;
+	hw_data->free_irq = adf_vf_isr_resource_free;
+	hw_data->enable_error_correction = adf_vf_void_noop;
+	hw_data->init_admin_comms = adf_vf_int_noop;
+	hw_data->exit_admin_comms = adf_vf_void_noop;
+	hw_data->send_admin_init = adf_vf2pf_init;
+	hw_data->init_arb = adf_vf_int_noop;
+	hw_data->exit_arb = adf_vf_void_noop;
+	hw_data->disable_iov = adf_vf2pf_shutdown;
+	hw_data->get_accel_mask = get_accel_mask;
+	hw_data->get_ae_mask = get_ae_mask;
+	hw_data->get_num_accels = get_num_accels;
+	hw_data->get_num_aes = get_num_aes;
+	hw_data->get_etr_bar_id = get_etr_bar_id;
+	hw_data->get_misc_bar_id = get_misc_bar_id;
+	hw_data->get_pf2vf_offset = get_pf2vf_offset;
+	hw_data->get_vintmsk_offset = get_vintmsk_offset;
+	hw_data->get_sku = get_sku;
+	hw_data->enable_ints = adf_vf_void_noop;
+	hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
+	hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+	hw_data->dev_class->instances++;
+	adf_devmgr_update_class_index(hw_data);
+}
+
+void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class->instances--;
+	adf_devmgr_update_class_index(hw_data);
+}
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
new file mode 100644
index 0000000..934f216
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
@@ -0,0 +1,64 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_C3XXXVF_HW_DATA_H_
+#define ADF_C3XXXVF_HW_DATA_H_
+
+#define ADF_C3XXXIOV_PMISC_BAR 1
+#define ADF_C3XXXIOV_ACCELERATORS_MASK 0x1
+#define ADF_C3XXXIOV_ACCELENGINES_MASK 0x1
+#define ADF_C3XXXIOV_MAX_ACCELERATORS 1
+#define ADF_C3XXXIOV_MAX_ACCELENGINES 1
+#define ADF_C3XXXIOV_RX_RINGS_OFFSET 8
+#define ADF_C3XXXIOV_TX_RINGS_MASK 0xFF
+#define ADF_C3XXXIOV_ETR_BAR 0
+#define ADF_C3XXXIOV_ETR_MAX_BANKS 1
+#define ADF_C3XXXIOV_PF2VF_OFFSET	0x200
+#define ADF_C3XXXIOV_VINTMSK_OFFSET	0x208
+
+void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
new file mode 100644
index 0000000..613c7d5
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
@@ -0,0 +1,305 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_c3xxxvf_hw_data.h"
+
+#define ADF_SYSTEM_DEVICE(device_id) \
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+	ADF_SYSTEM_DEVICE(ADF_C3XXXIOV_PCI_DEVICE_ID),
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+	.id_table = adf_pci_tbl,
+	.name = ADF_C3XXXVF_DEVICE_NAME,
+	.probe = adf_probe,
+	.remove = adf_remove,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+	pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+	pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+	struct adf_accel_dev *pf;
+	int i;
+
+	for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+		if (bar->virt_addr)
+			pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+	}
+
+	if (accel_dev->hw_device) {
+		switch (accel_pci_dev->pci_dev->device) {
+		case ADF_C3XXXIOV_PCI_DEVICE_ID:
+			adf_clean_hw_data_c3xxxiov(accel_dev->hw_device);
+			break;
+		default:
+			break;
+		}
+		kfree(accel_dev->hw_device);
+		accel_dev->hw_device = NULL;
+	}
+	adf_cfg_dev_remove(accel_dev);
+	debugfs_remove(accel_dev->debugfs_dir);
+	pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
+	adf_devmgr_rm_dev(accel_dev, pf);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct adf_accel_dev *accel_dev;
+	struct adf_accel_dev *pf;
+	struct adf_accel_pci *accel_pci_dev;
+	struct adf_hw_device_data *hw_data;
+	char name[ADF_DEVICE_NAME_LENGTH];
+	unsigned int i, bar_nr;
+	unsigned long bar_mask;
+	int ret;
+
+	switch (ent->device) {
+	case ADF_C3XXXIOV_PCI_DEVICE_ID:
+		break;
+	default:
+		dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+		return -ENODEV;
+	}
+
+	accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+				 dev_to_node(&pdev->dev));
+	if (!accel_dev)
+		return -ENOMEM;
+
+	accel_dev->is_vf = true;
+	pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
+	accel_pci_dev = &accel_dev->accel_pci_dev;
+	accel_pci_dev->pci_dev = pdev;
+
+	/* Add accel device to accel table */
+	if (adf_devmgr_add_dev(accel_dev, pf)) {
+		dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+		kfree(accel_dev);
+		return -EFAULT;
+	}
+	INIT_LIST_HEAD(&accel_dev->crypto_list);
+
+	accel_dev->owner = THIS_MODULE;
+	/* Allocate and configure device configuration structure */
+	hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+			       dev_to_node(&pdev->dev));
+	if (!hw_data) {
+		ret = -ENOMEM;
+		goto out_err;
+	}
+	accel_dev->hw_device = hw_data;
+	adf_init_hw_data_c3xxxiov(accel_dev->hw_device);
+
+	/* Get Accelerators and Accelerators Engines masks */
+	hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
+	hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
+	accel_pci_dev->sku = hw_data->get_sku(hw_data);
+
+	/* Create dev top level debugfs entry */
+	snprintf(name, sizeof(name), "%s%s_%02x:%02d.%d",
+		 ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
+		 pdev->bus->number, PCI_SLOT(pdev->devfn),
+		 PCI_FUNC(pdev->devfn));
+
+	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+	if (!accel_dev->debugfs_dir) {
+		dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+	/* Create device configuration table */
+	ret = adf_cfg_dev_add(accel_dev);
+	if (ret)
+		goto out_err;
+
+	/* enable PCI device */
+	if (pci_enable_device(pdev)) {
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* set dma identifier */
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+			dev_err(&pdev->dev, "No usable DMA configuration\n");
+			ret = -EFAULT;
+			goto out_err_disable;
+		} else {
+			pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		}
+
+	} else {
+		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	}
+
+	if (pci_request_regions(pdev, ADF_C3XXXVF_DEVICE_NAME)) {
+		ret = -EFAULT;
+		goto out_err_disable;
+	}
+
+	/* Find and map all the device's BARS */
+	i = 0;
+	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+		bar->base_addr = pci_resource_start(pdev, bar_nr);
+		if (!bar->base_addr)
+			break;
+		bar->size = pci_resource_len(pdev, bar_nr);
+		bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+		if (!bar->virt_addr) {
+			dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+			ret = -EFAULT;
+			goto out_err_free_reg;
+		}
+	}
+	pci_set_master(pdev);
+	/* Completion for VF2PF request/response message exchange */
+	init_completion(&accel_dev->vf.iov_msg_completion);
+
+	ret = qat_crypto_dev_config(accel_dev);
+	if (ret)
+		goto out_err_free_reg;
+
+	set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
+	ret = adf_dev_init(accel_dev);
+	if (ret)
+		goto out_err_dev_shutdown;
+
+	ret = adf_dev_start(accel_dev);
+	if (ret)
+		goto out_err_dev_stop;
+
+	return ret;
+
+out_err_dev_stop:
+	adf_dev_stop(accel_dev);
+out_err_dev_shutdown:
+	adf_dev_shutdown(accel_dev);
+out_err_free_reg:
+	pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+	pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+	adf_cleanup_accel(accel_dev);
+	kfree(accel_dev);
+	return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+	if (!accel_dev) {
+		pr_err("QAT: Driver removal failed\n");
+		return;
+	}
+	adf_dev_stop(accel_dev);
+	adf_dev_shutdown(accel_dev);
+	adf_cleanup_accel(accel_dev);
+	adf_cleanup_pci_dev(accel_dev);
+	kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+	request_module("intel_qat");
+
+	if (pci_register_driver(&adf_driver)) {
+		pr_err("QAT: Driver initialization failed\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+	pci_unregister_driver(&adf_driver);
+	adf_clean_vf_map(true);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c62x/Makefile b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c62x/Makefile
new file mode 100644
index 0000000..bd75ace
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c62x/Makefile
@@ -0,0 +1,3 @@
+ccflags-y := -I$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o
+qat_c62x-objs := adf_drv.o adf_c62x_hw_data.o
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
new file mode 100644
index 0000000..618cec3
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
@@ -0,0 +1,249 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+	* Redistributions of source code must retain the above copyright
+	  notice, this list of conditions and the following disclaimer.
+	* Redistributions in binary form must reproduce the above copyright
+	  notice, this list of conditions and the following disclaimer in
+	  the documentation and/or other materials provided with the
+	  distribution.
+	* Neither the name of Intel Corporation nor the names of its
+	  contributors may be used to endorse or promote products derived
+	  from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_pf2vf_msg.h>
+#include "adf_c62x_hw_data.h"
+
+/* Worker thread to service arbiter mappings based on dev SKUs */
+static const u32 thrd_to_arb_map_8_me_sku[] = {
+	0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
+	0x11222AAA, 0x12222AAA, 0x11222AAA, 0, 0
+};
+
+static const u32 thrd_to_arb_map_10_me_sku[] = {
+	0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
+	0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA
+};
+
+static struct adf_hw_device_class c62x_class = {
+	.name = ADF_C62X_DEVICE_NAME,
+	.type = DEV_C62X,
+	.instances = 0
+};
+
+static u32 get_accel_mask(u32 fuse)
+{
+	return (~fuse) >> ADF_C62X_ACCELERATORS_REG_OFFSET &
+			  ADF_C62X_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(u32 fuse)
+{
+	return (~fuse) & ADF_C62X_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+	u32 i, ctr = 0;
+
+	if (!self || !self->accel_mask)
+		return 0;
+
+	for (i = 0; i < ADF_C62X_MAX_ACCELERATORS; i++) {
+		if (self->accel_mask & (1 << i))
+			ctr++;
+	}
+	return ctr;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+	u32 i, ctr = 0;
+
+	if (!self || !self->ae_mask)
+		return 0;
+
+	for (i = 0; i < ADF_C62X_MAX_ACCELENGINES; i++) {
+		if (self->ae_mask & (1 << i))
+			ctr++;
+	}
+	return ctr;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_C62X_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_C62X_ETR_BAR;
+}
+
+static u32 get_sram_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_C62X_SRAM_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+	int aes = get_num_aes(self);
+
+	if (aes == 8)
+		return DEV_SKU_2;
+	else if (aes == 10)
+		return DEV_SKU_4;
+
+	return DEV_SKU_UNKNOWN;
+}
+
+static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
+				    u32 const **arb_map_config)
+{
+	switch (accel_dev->accel_pci_dev.sku) {
+	case DEV_SKU_2:
+		*arb_map_config = thrd_to_arb_map_8_me_sku;
+		break;
+	case DEV_SKU_4:
+		*arb_map_config = thrd_to_arb_map_10_me_sku;
+		break;
+	default:
+		dev_err(&GET_DEV(accel_dev),
+			"The configuration doesn't match any SKU");
+		*arb_map_config = NULL;
+	}
+}
+
+static u32 get_pf2vf_offset(u32 i)
+{
+	return ADF_C62X_PF2VF_OFFSET(i);
+}
+
+static u32 get_vintmsk_offset(u32 i)
+{
+	return ADF_C62X_VINTMSK_OFFSET(i);
+}
+
+static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
+{
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+	struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C62X_PMISC_BAR];
+	void __iomem *csr = misc_bar->virt_addr;
+	unsigned int val, i;
+
+	/* Enable Accel Engine error detection & correction */
+	for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
+		val = ADF_CSR_RD(csr, ADF_C62X_AE_CTX_ENABLES(i));
+		val |= ADF_C62X_ENABLE_AE_ECC_ERR;
+		ADF_CSR_WR(csr, ADF_C62X_AE_CTX_ENABLES(i), val);
+		val = ADF_CSR_RD(csr, ADF_C62X_AE_MISC_CONTROL(i));
+		val |= ADF_C62X_ENABLE_AE_ECC_PARITY_CORR;
+		ADF_CSR_WR(csr, ADF_C62X_AE_MISC_CONTROL(i), val);
+	}
+
+	/* Enable shared memory error detection & correction */
+	for (i = 0; i < hw_device->get_num_accels(hw_device); i++) {
+		val = ADF_CSR_RD(csr, ADF_C62X_UERRSSMSH(i));
+		val |= ADF_C62X_ERRSSMSH_EN;
+		ADF_CSR_WR(csr, ADF_C62X_UERRSSMSH(i), val);
+		val = ADF_CSR_RD(csr, ADF_C62X_CERRSSMSH(i));
+		val |= ADF_C62X_ERRSSMSH_EN;
+		ADF_CSR_WR(csr, ADF_C62X_CERRSSMSH(i), val);
+	}
+}
+
+static void adf_enable_ints(struct adf_accel_dev *accel_dev)
+{
+	void __iomem *addr;
+
+	addr = (&GET_BARS(accel_dev)[ADF_C62X_PMISC_BAR])->virt_addr;
+
+	/* Enable bundle and misc interrupts */
+	ADF_CSR_WR(addr, ADF_C62X_SMIAPF0_MASK_OFFSET,
+		   ADF_C62X_SMIA0_MASK);
+	ADF_CSR_WR(addr, ADF_C62X_SMIAPF1_MASK_OFFSET,
+		   ADF_C62X_SMIA1_MASK);
+}
+
+static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+{
+	return 0;
+}
+
+void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class = &c62x_class;
+	hw_data->instance_id = c62x_class.instances++;
+	hw_data->num_banks = ADF_C62X_ETR_MAX_BANKS;
+	hw_data->num_accel = ADF_C62X_MAX_ACCELERATORS;
+	hw_data->num_logical_accel = 1;
+	hw_data->num_engines = ADF_C62X_MAX_ACCELENGINES;
+	hw_data->tx_rx_gap = ADF_C62X_RX_RINGS_OFFSET;
+	hw_data->tx_rings_mask = ADF_C62X_TX_RINGS_MASK;
+	hw_data->alloc_irq = adf_isr_resource_alloc;
+	hw_data->free_irq = adf_isr_resource_free;
+	hw_data->enable_error_correction = adf_enable_error_correction;
+	hw_data->get_accel_mask = get_accel_mask;
+	hw_data->get_ae_mask = get_ae_mask;
+	hw_data->get_num_accels = get_num_accels;
+	hw_data->get_num_aes = get_num_aes;
+	hw_data->get_sram_bar_id = get_sram_bar_id;
+	hw_data->get_etr_bar_id = get_etr_bar_id;
+	hw_data->get_misc_bar_id = get_misc_bar_id;
+	hw_data->get_pf2vf_offset = get_pf2vf_offset;
+	hw_data->get_vintmsk_offset = get_vintmsk_offset;
+	hw_data->get_sku = get_sku;
+	hw_data->fw_name = ADF_C62X_FW;
+	hw_data->fw_mmp_name = ADF_C62X_MMP;
+	hw_data->init_admin_comms = adf_init_admin_comms;
+	hw_data->exit_admin_comms = adf_exit_admin_comms;
+	hw_data->disable_iov = adf_disable_sriov;
+	hw_data->send_admin_init = adf_send_admin_init;
+	hw_data->init_arb = adf_init_arb;
+	hw_data->exit_arb = adf_exit_arb;
+	hw_data->get_arb_mapping = adf_get_arbiter_mapping;
+	hw_data->enable_ints = adf_enable_ints;
+	hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
+	hw_data->reset_device = adf_reset_flr;
+	hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+}
+
+void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class->instances--;
+}
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
new file mode 100644
index 0000000..17a8a32
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
@@ -0,0 +1,84 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_C62X_HW_DATA_H_
+#define ADF_C62X_HW_DATA_H_
+
+/* PCIe configuration space */
+#define ADF_C62X_SRAM_BAR 0
+#define ADF_C62X_PMISC_BAR 1
+#define ADF_C62X_ETR_BAR 2
+#define ADF_C62X_RX_RINGS_OFFSET 8
+#define ADF_C62X_TX_RINGS_MASK 0xFF
+#define ADF_C62X_MAX_ACCELERATORS 5
+#define ADF_C62X_MAX_ACCELENGINES 10
+#define ADF_C62X_ACCELERATORS_REG_OFFSET 16
+#define ADF_C62X_ACCELERATORS_MASK 0x1F
+#define ADF_C62X_ACCELENGINES_MASK 0x3FF
+#define ADF_C62X_ETR_MAX_BANKS 16
+#define ADF_C62X_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
+#define ADF_C62X_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
+#define ADF_C62X_SMIA0_MASK 0xFFFF
+#define ADF_C62X_SMIA1_MASK 0x1
+/* Error detection and correction */
+#define ADF_C62X_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
+#define ADF_C62X_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960)
+#define ADF_C62X_ENABLE_AE_ECC_ERR BIT(28)
+#define ADF_C62X_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12))
+#define ADF_C62X_UERRSSMSH(i) (i * 0x4000 + 0x18)
+#define ADF_C62X_CERRSSMSH(i) (i * 0x4000 + 0x10)
+#define ADF_C62X_ERRSSMSH_EN BIT(3)
+
+#define ADF_C62X_PF2VF_OFFSET(i)	(0x3A000 + 0x280 + ((i) * 0x04))
+#define ADF_C62X_VINTMSK_OFFSET(i)	(0x3A000 + 0x200 + ((i) * 0x04))
+
+/* Firmware Binary */
+#define ADF_C62X_FW "qat_c62x.bin"
+#define ADF_C62X_MMP "qat_c62x_mmp.bin"
+
+void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c62x/adf_drv.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c62x/adf_drv.c
new file mode 100644
index 0000000..cb11d85
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c62x/adf_drv.c
@@ -0,0 +1,333 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_c62x_hw_data.h"
+
+#define ADF_SYSTEM_DEVICE(device_id) \
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+	ADF_SYSTEM_DEVICE(ADF_C62X_PCI_DEVICE_ID),
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+	.id_table = adf_pci_tbl,
+	.name = ADF_C62X_DEVICE_NAME,
+	.probe = adf_probe,
+	.remove = adf_remove,
+	.sriov_configure = adf_sriov_configure,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+	pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+	pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+	int i;
+
+	for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+		if (bar->virt_addr)
+			pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+	}
+
+	if (accel_dev->hw_device) {
+		switch (accel_pci_dev->pci_dev->device) {
+		case ADF_C62X_PCI_DEVICE_ID:
+			adf_clean_hw_data_c62x(accel_dev->hw_device);
+			break;
+		default:
+			break;
+		}
+		kfree(accel_dev->hw_device);
+		accel_dev->hw_device = NULL;
+	}
+	adf_cfg_dev_remove(accel_dev);
+	debugfs_remove(accel_dev->debugfs_dir);
+	adf_devmgr_rm_dev(accel_dev, NULL);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct adf_accel_dev *accel_dev;
+	struct adf_accel_pci *accel_pci_dev;
+	struct adf_hw_device_data *hw_data;
+	char name[ADF_DEVICE_NAME_LENGTH];
+	unsigned int i, bar_nr;
+	unsigned long bar_mask;
+	int ret;
+
+	switch (ent->device) {
+	case ADF_C62X_PCI_DEVICE_ID:
+		break;
+	default:
+		dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+		return -ENODEV;
+	}
+
+	if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+		/* If the accelerator is connected to a node with no memory
+		 * there is no point in using the accelerator since the remote
+		 * memory transaction will be very slow. */
+		dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+		return -EINVAL;
+	}
+
+	accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+				 dev_to_node(&pdev->dev));
+	if (!accel_dev)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&accel_dev->crypto_list);
+	accel_pci_dev = &accel_dev->accel_pci_dev;
+	accel_pci_dev->pci_dev = pdev;
+
+	/* Add accel device to accel table.
+	 * This should be called before adf_cleanup_accel is called */
+	if (adf_devmgr_add_dev(accel_dev, NULL)) {
+		dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+		kfree(accel_dev);
+		return -EFAULT;
+	}
+
+	accel_dev->owner = THIS_MODULE;
+	/* Allocate and configure device configuration structure */
+	hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+			       dev_to_node(&pdev->dev));
+	if (!hw_data) {
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	accel_dev->hw_device = hw_data;
+	adf_init_hw_data_c62x(accel_dev->hw_device);
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+	pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
+			      &hw_data->fuses);
+
+	/* Get Accelerators and Accelerators Engines masks */
+	hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
+	hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
+	accel_pci_dev->sku = hw_data->get_sku(hw_data);
+	/* If the device has no acceleration engines then ignore it. */
+	if (!hw_data->accel_mask || !hw_data->ae_mask ||
+	    ((~hw_data->ae_mask) & 0x01)) {
+		dev_err(&pdev->dev, "No acceleration units found");
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* Create dev top level debugfs entry */
+	snprintf(name, sizeof(name), "%s%s_%02x:%02d.%d",
+		 ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
+		 pdev->bus->number, PCI_SLOT(pdev->devfn),
+		 PCI_FUNC(pdev->devfn));
+
+	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+	if (!accel_dev->debugfs_dir) {
+		dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+	/* Create device configuration table */
+	ret = adf_cfg_dev_add(accel_dev);
+	if (ret)
+		goto out_err;
+
+	/* enable PCI device */
+	if (pci_enable_device(pdev)) {
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* set dma identifier */
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+			dev_err(&pdev->dev, "No usable DMA configuration\n");
+			ret = -EFAULT;
+			goto out_err_disable;
+		} else {
+			pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		}
+
+	} else {
+		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	}
+
+	if (pci_request_regions(pdev, ADF_C62X_DEVICE_NAME)) {
+		ret = -EFAULT;
+		goto out_err_disable;
+	}
+
+	/* Read accelerator capabilities mask */
+	pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET,
+			      &hw_data->accel_capabilities_mask);
+
+	/* Find and map all the device's BARS */
+	i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
+	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+		bar->base_addr = pci_resource_start(pdev, bar_nr);
+		if (!bar->base_addr)
+			break;
+		bar->size = pci_resource_len(pdev, bar_nr);
+		bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+		if (!bar->virt_addr) {
+			dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+			ret = -EFAULT;
+			goto out_err_free_reg;
+		}
+	}
+	pci_set_master(pdev);
+
+	if (adf_enable_aer(accel_dev, &adf_driver)) {
+		dev_err(&pdev->dev, "Failed to enable aer\n");
+		ret = -EFAULT;
+		goto out_err_free_reg;
+	}
+
+	if (pci_save_state(pdev)) {
+		dev_err(&pdev->dev, "Failed to save pci state\n");
+		ret = -ENOMEM;
+		goto out_err_free_reg;
+	}
+
+	ret = qat_crypto_dev_config(accel_dev);
+	if (ret)
+		goto out_err_free_reg;
+
+	ret = adf_dev_init(accel_dev);
+	if (ret)
+		goto out_err_dev_shutdown;
+
+	ret = adf_dev_start(accel_dev);
+	if (ret)
+		goto out_err_dev_stop;
+
+	return ret;
+
+out_err_dev_stop:
+	adf_dev_stop(accel_dev);
+out_err_dev_shutdown:
+	adf_dev_shutdown(accel_dev);
+out_err_free_reg:
+	pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+	pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+	adf_cleanup_accel(accel_dev);
+	kfree(accel_dev);
+	return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+	if (!accel_dev) {
+		pr_err("QAT: Driver removal failed\n");
+		return;
+	}
+	adf_dev_stop(accel_dev);
+	adf_dev_shutdown(accel_dev);
+	adf_disable_aer(accel_dev);
+	adf_cleanup_accel(accel_dev);
+	adf_cleanup_pci_dev(accel_dev);
+	kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+	request_module("intel_qat");
+
+	if (pci_register_driver(&adf_driver)) {
+		pr_err("QAT: Driver initialization failed\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+	pci_unregister_driver(&adf_driver);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c62xvf/Makefile b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c62xvf/Makefile
new file mode 100644
index 0000000..ecd708c
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c62xvf/Makefile
@@ -0,0 +1,3 @@
+ccflags-y := -I$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o
+qat_c62xvf-objs := adf_drv.o adf_c62xvf_hw_data.o
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
new file mode 100644
index 0000000..38e4bc0
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
@@ -0,0 +1,150 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include <adf_pf2vf_msg.h>
+#include <adf_common_drv.h>
+#include "adf_c62xvf_hw_data.h"
+
+static struct adf_hw_device_class c62xiov_class = {
+	.name = ADF_C62XVF_DEVICE_NAME,
+	.type = DEV_C62XVF,
+	.instances = 0
+};
+
+static u32 get_accel_mask(u32 fuse)
+{
+	return ADF_C62XIOV_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(u32 fuse)
+{
+	return ADF_C62XIOV_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+	return ADF_C62XIOV_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+	return ADF_C62XIOV_MAX_ACCELENGINES;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_C62XIOV_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_C62XIOV_ETR_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+	return DEV_SKU_VF;
+}
+
+static u32 get_pf2vf_offset(u32 i)
+{
+	return ADF_C62XIOV_PF2VF_OFFSET;
+}
+
+static u32 get_vintmsk_offset(u32 i)
+{
+	return ADF_C62XIOV_VINTMSK_OFFSET;
+}
+
+static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
+{
+	return 0;
+}
+
+static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
+{
+}
+
+void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class = &c62xiov_class;
+	hw_data->num_banks = ADF_C62XIOV_ETR_MAX_BANKS;
+	hw_data->num_accel = ADF_C62XIOV_MAX_ACCELERATORS;
+	hw_data->num_logical_accel = 1;
+	hw_data->num_engines = ADF_C62XIOV_MAX_ACCELENGINES;
+	hw_data->tx_rx_gap = ADF_C62XIOV_RX_RINGS_OFFSET;
+	hw_data->tx_rings_mask = ADF_C62XIOV_TX_RINGS_MASK;
+	hw_data->alloc_irq = adf_vf_isr_resource_alloc;
+	hw_data->free_irq = adf_vf_isr_resource_free;
+	hw_data->enable_error_correction = adf_vf_void_noop;
+	hw_data->init_admin_comms = adf_vf_int_noop;
+	hw_data->exit_admin_comms = adf_vf_void_noop;
+	hw_data->send_admin_init = adf_vf2pf_init;
+	hw_data->init_arb = adf_vf_int_noop;
+	hw_data->exit_arb = adf_vf_void_noop;
+	hw_data->disable_iov = adf_vf2pf_shutdown;
+	hw_data->get_accel_mask = get_accel_mask;
+	hw_data->get_ae_mask = get_ae_mask;
+	hw_data->get_num_accels = get_num_accels;
+	hw_data->get_num_aes = get_num_aes;
+	hw_data->get_etr_bar_id = get_etr_bar_id;
+	hw_data->get_misc_bar_id = get_misc_bar_id;
+	hw_data->get_pf2vf_offset = get_pf2vf_offset;
+	hw_data->get_vintmsk_offset = get_vintmsk_offset;
+	hw_data->get_sku = get_sku;
+	hw_data->enable_ints = adf_vf_void_noop;
+	hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
+	hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+	hw_data->dev_class->instances++;
+	adf_devmgr_update_class_index(hw_data);
+}
+
+void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class->instances--;
+	adf_devmgr_update_class_index(hw_data);
+}
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h
new file mode 100644
index 0000000..a28d83e
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h
@@ -0,0 +1,64 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_C62XVF_HW_DATA_H_
+#define ADF_C62XVF_HW_DATA_H_
+
+#define ADF_C62XIOV_PMISC_BAR 1
+#define ADF_C62XIOV_ACCELERATORS_MASK 0x1
+#define ADF_C62XIOV_ACCELENGINES_MASK 0x1
+#define ADF_C62XIOV_MAX_ACCELERATORS 1
+#define ADF_C62XIOV_MAX_ACCELENGINES 1
+#define ADF_C62XIOV_RX_RINGS_OFFSET 8
+#define ADF_C62XIOV_TX_RINGS_MASK 0xFF
+#define ADF_C62XIOV_ETR_BAR 0
+#define ADF_C62XIOV_ETR_MAX_BANKS 1
+#define ADF_C62XIOV_PF2VF_OFFSET	0x200
+#define ADF_C62XIOV_VINTMSK_OFFSET	0x208
+
+void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c62xvf/adf_drv.c
new file mode 100644
index 0000000..278452b
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_c62xvf/adf_drv.c
@@ -0,0 +1,305 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_c62xvf_hw_data.h"
+
+#define ADF_SYSTEM_DEVICE(device_id) \
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+	ADF_SYSTEM_DEVICE(ADF_C62XIOV_PCI_DEVICE_ID),
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+	.id_table = adf_pci_tbl,
+	.name = ADF_C62XVF_DEVICE_NAME,
+	.probe = adf_probe,
+	.remove = adf_remove,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+	pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+	pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+	struct adf_accel_dev *pf;
+	int i;
+
+	for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+		if (bar->virt_addr)
+			pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+	}
+
+	if (accel_dev->hw_device) {
+		switch (accel_pci_dev->pci_dev->device) {
+		case ADF_C62XIOV_PCI_DEVICE_ID:
+			adf_clean_hw_data_c62xiov(accel_dev->hw_device);
+			break;
+		default:
+			break;
+		}
+		kfree(accel_dev->hw_device);
+		accel_dev->hw_device = NULL;
+	}
+	adf_cfg_dev_remove(accel_dev);
+	debugfs_remove(accel_dev->debugfs_dir);
+	pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
+	adf_devmgr_rm_dev(accel_dev, pf);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct adf_accel_dev *accel_dev;
+	struct adf_accel_dev *pf;
+	struct adf_accel_pci *accel_pci_dev;
+	struct adf_hw_device_data *hw_data;
+	char name[ADF_DEVICE_NAME_LENGTH];
+	unsigned int i, bar_nr;
+	unsigned long bar_mask;
+	int ret;
+
+	switch (ent->device) {
+	case ADF_C62XIOV_PCI_DEVICE_ID:
+		break;
+	default:
+		dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+		return -ENODEV;
+	}
+
+	accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+				 dev_to_node(&pdev->dev));
+	if (!accel_dev)
+		return -ENOMEM;
+
+	accel_dev->is_vf = true;
+	pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
+	accel_pci_dev = &accel_dev->accel_pci_dev;
+	accel_pci_dev->pci_dev = pdev;
+
+	/* Add accel device to accel table */
+	if (adf_devmgr_add_dev(accel_dev, pf)) {
+		dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+		kfree(accel_dev);
+		return -EFAULT;
+	}
+	INIT_LIST_HEAD(&accel_dev->crypto_list);
+
+	accel_dev->owner = THIS_MODULE;
+	/* Allocate and configure device configuration structure */
+	hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+			       dev_to_node(&pdev->dev));
+	if (!hw_data) {
+		ret = -ENOMEM;
+		goto out_err;
+	}
+	accel_dev->hw_device = hw_data;
+	adf_init_hw_data_c62xiov(accel_dev->hw_device);
+
+	/* Get Accelerators and Accelerators Engines masks */
+	hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
+	hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
+	accel_pci_dev->sku = hw_data->get_sku(hw_data);
+
+	/* Create dev top level debugfs entry */
+	snprintf(name, sizeof(name), "%s%s_%02x:%02d.%d",
+		 ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
+		 pdev->bus->number, PCI_SLOT(pdev->devfn),
+		 PCI_FUNC(pdev->devfn));
+
+	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+	if (!accel_dev->debugfs_dir) {
+		dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+	/* Create device configuration table */
+	ret = adf_cfg_dev_add(accel_dev);
+	if (ret)
+		goto out_err;
+
+	/* enable PCI device */
+	if (pci_enable_device(pdev)) {
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* set dma identifier */
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+			dev_err(&pdev->dev, "No usable DMA configuration\n");
+			ret = -EFAULT;
+			goto out_err_disable;
+		} else {
+			pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		}
+
+	} else {
+		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	}
+
+	if (pci_request_regions(pdev, ADF_C62XVF_DEVICE_NAME)) {
+		ret = -EFAULT;
+		goto out_err_disable;
+	}
+
+	/* Find and map all the device's BARS */
+	i = 0;
+	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+		bar->base_addr = pci_resource_start(pdev, bar_nr);
+		if (!bar->base_addr)
+			break;
+		bar->size = pci_resource_len(pdev, bar_nr);
+		bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+		if (!bar->virt_addr) {
+			dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+			ret = -EFAULT;
+			goto out_err_free_reg;
+		}
+	}
+	pci_set_master(pdev);
+	/* Completion for VF2PF request/response message exchange */
+	init_completion(&accel_dev->vf.iov_msg_completion);
+
+	ret = qat_crypto_dev_config(accel_dev);
+	if (ret)
+		goto out_err_free_reg;
+
+	set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
+	ret = adf_dev_init(accel_dev);
+	if (ret)
+		goto out_err_dev_shutdown;
+
+	ret = adf_dev_start(accel_dev);
+	if (ret)
+		goto out_err_dev_stop;
+
+	return ret;
+
+out_err_dev_stop:
+	adf_dev_stop(accel_dev);
+out_err_dev_shutdown:
+	adf_dev_shutdown(accel_dev);
+out_err_free_reg:
+	pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+	pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+	adf_cleanup_accel(accel_dev);
+	kfree(accel_dev);
+	return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+	if (!accel_dev) {
+		pr_err("QAT: Driver removal failed\n");
+		return;
+	}
+	adf_dev_stop(accel_dev);
+	adf_dev_shutdown(accel_dev);
+	adf_cleanup_accel(accel_dev);
+	adf_cleanup_pci_dev(accel_dev);
+	kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+	request_module("intel_qat");
+
+	if (pci_register_driver(&adf_driver)) {
+		pr_err("QAT: Driver initialization failed\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+	pci_unregister_driver(&adf_driver);
+	adf_clean_vf_map(true);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/.gitignore b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/.gitignore
new file mode 100644
index 0000000..ee32837
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/.gitignore
@@ -0,0 +1 @@
+*-asn1.[ch]
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/Makefile b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/Makefile
new file mode 100644
index 0000000..47a8e3d
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/Makefile
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
+intel_qat-objs := adf_cfg.o \
+	adf_isr.o \
+	adf_ctl_drv.o \
+	adf_dev_mgr.o \
+	adf_init.o \
+	adf_accel_engine.o \
+	adf_aer.o \
+	adf_transport.o \
+	adf_admin.o \
+	adf_hw_arbiter.o \
+	qat_crypto.o \
+	qat_algs.o \
+	qat_asym_algs.o \
+	qat_uclo.o \
+	qat_hal.o
+
+intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
+intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_pf2vf_msg.o \
+			       adf_vf2pf_msg.o adf_vf_isr.o
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_accel_devices.h b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_accel_devices.h
new file mode 100644
index 0000000..33f0a62
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -0,0 +1,258 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_ACCEL_DEVICES_H_
+#define ADF_ACCEL_DEVICES_H_
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/ratelimit.h>
+#include "adf_cfg_common.h"
+
+#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
+#define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
+#define ADF_C62X_DEVICE_NAME "c6xx"
+#define ADF_C62XVF_DEVICE_NAME "c6xxvf"
+#define ADF_C3XXX_DEVICE_NAME "c3xxx"
+#define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
+#define ADF_DH895XCC_PCI_DEVICE_ID 0x435
+#define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443
+#define ADF_C62X_PCI_DEVICE_ID 0x37c8
+#define ADF_C62XIOV_PCI_DEVICE_ID 0x37c9
+#define ADF_C3XXX_PCI_DEVICE_ID 0x19e2
+#define ADF_C3XXXIOV_PCI_DEVICE_ID 0x19e3
+#define ADF_ERRSOU3 (0x3A000 + 0x0C)
+#define ADF_ERRSOU5 (0x3A000 + 0xD8)
+#define ADF_DEVICE_FUSECTL_OFFSET 0x40
+#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
+#define ADF_DEVICE_FUSECTL_MASK 0x80000000
+#define ADF_PCI_MAX_BARS 3
+#define ADF_DEVICE_NAME_LENGTH 32
+#define ADF_ETR_MAX_RINGS_PER_BANK 16
+#define ADF_MAX_MSIX_VECTOR_NAME 16
+#define ADF_DEVICE_NAME_PREFIX "qat_"
+
+enum adf_accel_capabilities {
+	ADF_ACCEL_CAPABILITIES_NULL = 0,
+	ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1,
+	ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2,
+	ADF_ACCEL_CAPABILITIES_CIPHER = 4,
+	ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8,
+	ADF_ACCEL_CAPABILITIES_COMPRESSION = 32,
+	ADF_ACCEL_CAPABILITIES_LZS_COMPRESSION = 64,
+	ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128
+};
+
+struct adf_bar {
+	resource_size_t base_addr;
+	void __iomem *virt_addr;
+	resource_size_t size;
+} __packed;
+
+struct adf_accel_msix {
+	struct msix_entry *entries;
+	char **names;
+	u32 num_entries;
+} __packed;
+
+struct adf_accel_pci {
+	struct pci_dev *pci_dev;
+	struct adf_accel_msix msix_entries;
+	struct adf_bar pci_bars[ADF_PCI_MAX_BARS];
+	uint8_t revid;
+	uint8_t sku;
+} __packed;
+
+enum dev_state {
+	DEV_DOWN = 0,
+	DEV_UP
+};
+
+enum dev_sku_info {
+	DEV_SKU_1 = 0,
+	DEV_SKU_2,
+	DEV_SKU_3,
+	DEV_SKU_4,
+	DEV_SKU_VF,
+	DEV_SKU_UNKNOWN,
+};
+
+static inline const char *get_sku_info(enum dev_sku_info info)
+{
+	switch (info) {
+	case DEV_SKU_1:
+		return "SKU1";
+	case DEV_SKU_2:
+		return "SKU2";
+	case DEV_SKU_3:
+		return "SKU3";
+	case DEV_SKU_4:
+		return "SKU4";
+	case DEV_SKU_VF:
+		return "SKUVF";
+	case DEV_SKU_UNKNOWN:
+	default:
+		break;
+	}
+	return "Unknown SKU";
+}
+
+struct adf_hw_device_class {
+	const char *name;
+	const enum adf_device_type type;
+	uint32_t instances;
+} __packed;
+
+struct adf_cfg_device_data;
+struct adf_accel_dev;
+struct adf_etr_data;
+struct adf_etr_ring_data;
+
+struct adf_hw_device_data {
+	struct adf_hw_device_class *dev_class;
+	uint32_t (*get_accel_mask)(uint32_t fuse);
+	uint32_t (*get_ae_mask)(uint32_t fuse);
+	uint32_t (*get_sram_bar_id)(struct adf_hw_device_data *self);
+	uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self);
+	uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self);
+	uint32_t (*get_num_aes)(struct adf_hw_device_data *self);
+	uint32_t (*get_num_accels)(struct adf_hw_device_data *self);
+	uint32_t (*get_pf2vf_offset)(uint32_t i);
+	uint32_t (*get_vintmsk_offset)(uint32_t i);
+	enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
+	int (*alloc_irq)(struct adf_accel_dev *accel_dev);
+	void (*free_irq)(struct adf_accel_dev *accel_dev);
+	void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
+	int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
+	void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
+	int (*send_admin_init)(struct adf_accel_dev *accel_dev);
+	int (*init_arb)(struct adf_accel_dev *accel_dev);
+	void (*exit_arb)(struct adf_accel_dev *accel_dev);
+	void (*get_arb_mapping)(struct adf_accel_dev *accel_dev,
+				const uint32_t **cfg);
+	void (*disable_iov)(struct adf_accel_dev *accel_dev);
+	void (*enable_ints)(struct adf_accel_dev *accel_dev);
+	int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
+	void (*reset_device)(struct adf_accel_dev *accel_dev);
+	const char *fw_name;
+	const char *fw_mmp_name;
+	uint32_t fuses;
+	uint32_t accel_capabilities_mask;
+	uint32_t instance_id;
+	uint16_t accel_mask;
+	uint16_t ae_mask;
+	uint16_t tx_rings_mask;
+	uint8_t tx_rx_gap;
+	uint8_t num_banks;
+	uint8_t num_accel;
+	uint8_t num_logical_accel;
+	uint8_t num_engines;
+	uint8_t min_iov_compat_ver;
+} __packed;
+
+/* CSR write macro */
+#define ADF_CSR_WR(csr_base, csr_offset, val) \
+	__raw_writel(val, csr_base + csr_offset)
+
+/* CSR read macro */
+#define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset)
+
+#define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev)
+#define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars)
+#define GET_HW_DATA(accel_dev) (accel_dev->hw_device)
+#define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks)
+#define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
+#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
+
+struct adf_admin_comms;
+struct icp_qat_fw_loader_handle;
+struct adf_fw_loader_data {
+	struct icp_qat_fw_loader_handle *fw_loader;
+	const struct firmware *uof_fw;
+	const struct firmware *mmp_fw;
+};
+
+struct adf_accel_vf_info {
+	struct adf_accel_dev *accel_dev;
+	struct tasklet_struct vf2pf_bh_tasklet;
+	struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
+	struct ratelimit_state vf2pf_ratelimit;
+	u32 vf_nr;
+	bool init;
+};
+
+struct adf_accel_dev {
+	struct adf_etr_data *transport;
+	struct adf_hw_device_data *hw_device;
+	struct adf_cfg_device_data *cfg;
+	struct adf_fw_loader_data *fw_loader;
+	struct adf_admin_comms *admin;
+	struct list_head crypto_list;
+	unsigned long status;
+	atomic_t ref_count;
+	struct dentry *debugfs_dir;
+	struct list_head list;
+	struct module *owner;
+	struct adf_accel_pci accel_pci_dev;
+	union {
+		struct {
+			/* vf_info is non-zero when SR-IOV is init'ed */
+			struct adf_accel_vf_info *vf_info;
+		} pf;
+		struct {
+			char *irq_name;
+			struct tasklet_struct pf2vf_bh_tasklet;
+			struct mutex vf2pf_lock; /* protect CSR access */
+			struct completion iov_msg_completion;
+			uint8_t compatible;
+			uint8_t pf_version;
+		} vf;
+	};
+	bool is_vf;
+	u32 accel_id;
+} __packed;
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_accel_engine.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_accel_engine.c
new file mode 100644
index 0000000..a42fc42
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_accel_engine.c
@@ -0,0 +1,209 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include "adf_cfg.h"
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_uclo.h"
+
+int adf_ae_fw_load(struct adf_accel_dev *accel_dev)
+{
+	struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+	void *uof_addr, *mmp_addr;
+	u32 uof_size, mmp_size;
+
+	if (!hw_device->fw_name)
+		return 0;
+
+	if (request_firmware(&loader_data->mmp_fw, hw_device->fw_mmp_name,
+			     &accel_dev->accel_pci_dev.pci_dev->dev)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to load MMP firmware %s\n",
+			hw_device->fw_mmp_name);
+		return -EFAULT;
+	}
+	if (request_firmware(&loader_data->uof_fw, hw_device->fw_name,
+			     &accel_dev->accel_pci_dev.pci_dev->dev)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to load UOF firmware %s\n",
+			hw_device->fw_name);
+		goto out_err;
+	}
+
+	uof_size = loader_data->uof_fw->size;
+	uof_addr = (void *)loader_data->uof_fw->data;
+	mmp_size = loader_data->mmp_fw->size;
+	mmp_addr = (void *)loader_data->mmp_fw->data;
+	if (qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to load MMP\n");
+		goto out_err;
+	}
+	if (qat_uclo_map_obj(loader_data->fw_loader, uof_addr, uof_size)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to map FW\n");
+		goto out_err;
+	}
+	if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to load UOF\n");
+		goto out_err;
+	}
+	return 0;
+
+out_err:
+	adf_ae_fw_release(accel_dev);
+	return -EFAULT;
+}
+
+void adf_ae_fw_release(struct adf_accel_dev *accel_dev)
+{
+	struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+	if (!hw_device->fw_name)
+		return;
+
+	qat_uclo_del_uof_obj(loader_data->fw_loader);
+	qat_hal_deinit(loader_data->fw_loader);
+	release_firmware(loader_data->uof_fw);
+	release_firmware(loader_data->mmp_fw);
+	loader_data->uof_fw = NULL;
+	loader_data->mmp_fw = NULL;
+	loader_data->fw_loader = NULL;
+}
+
+int adf_ae_start(struct adf_accel_dev *accel_dev)
+{
+	struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
+
+	if (!hw_data->fw_name)
+		return 0;
+
+	for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
+		if (hw_data->ae_mask & (1 << ae)) {
+			qat_hal_start(loader_data->fw_loader, ae, 0xFF);
+			ae_ctr++;
+		}
+	}
+	dev_info(&GET_DEV(accel_dev),
+		 "qat_dev%d started %d acceleration engines\n",
+		 accel_dev->accel_id, ae_ctr);
+	return 0;
+}
+
+int adf_ae_stop(struct adf_accel_dev *accel_dev)
+{
+	struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
+
+	if (!hw_data->fw_name)
+		return 0;
+
+	for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
+		if (hw_data->ae_mask & (1 << ae)) {
+			qat_hal_stop(loader_data->fw_loader, ae, 0xFF);
+			ae_ctr++;
+		}
+	}
+	dev_info(&GET_DEV(accel_dev),
+		 "qat_dev%d stopped %d acceleration engines\n",
+		 accel_dev->accel_id, ae_ctr);
+	return 0;
+}
+
+static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae)
+{
+	struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+
+	qat_hal_reset(loader_data->fw_loader);
+	if (qat_hal_clr_reset(loader_data->fw_loader))
+		return -EFAULT;
+
+	return 0;
+}
+
+int adf_ae_init(struct adf_accel_dev *accel_dev)
+{
+	struct adf_fw_loader_data *loader_data;
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+	if (!hw_device->fw_name)
+		return 0;
+
+	loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL);
+	if (!loader_data)
+		return -ENOMEM;
+
+	accel_dev->fw_loader = loader_data;
+	if (qat_hal_init(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to init the AEs\n");
+		kfree(loader_data);
+		return -EFAULT;
+	}
+	if (adf_ae_reset(accel_dev, 0)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to reset the AEs\n");
+		qat_hal_deinit(loader_data->fw_loader);
+		kfree(loader_data);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+int adf_ae_shutdown(struct adf_accel_dev *accel_dev)
+{
+	struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+	if (!hw_device->fw_name)
+		return 0;
+
+	qat_hal_deinit(loader_data->fw_loader);
+	kfree(accel_dev->fw_loader);
+	accel_dev->fw_loader = NULL;
+	return 0;
+}
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_admin.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_admin.c
new file mode 100644
index 0000000..3744b22
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_admin.c
@@ -0,0 +1,296 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_fw_init_admin.h"
+
+/* Admin Messages Registers */
+#define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574)
+#define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578)
+#define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970
+#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000
+#define ADF_ADMINMSG_LEN 32
+
+static const u8 const_tab[1024] __aligned(1024) = {
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13,
+0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76,
+0x54, 0x32, 0x10, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab,
+0x89, 0x98, 0xba, 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0,
+0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x05, 0x9e,
+0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd, 0x17, 0xf7, 0x0e, 0x59, 0x39,
+0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58, 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe,
+0xfa, 0x4f, 0xa4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae,
+0x85, 0x3c, 0x6e, 0xf3, 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f,
+0x9b, 0x05, 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19, 0x05,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
+0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17,
+0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67, 0x33, 0x26, 0x67, 0xff,
+0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c,
+0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f,
+0xa4, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb,
+0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
+0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, 0x0e, 0x52,
+0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f,
+0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13,
+0x7e, 0x21, 0x79, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+struct adf_admin_comms {
+	dma_addr_t phy_addr;
+	dma_addr_t const_tbl_addr;
+	void *virt_addr;
+	void *virt_tbl_addr;
+	void __iomem *mailbox_addr;
+	struct mutex lock;	/* protects adf_admin_comms struct */
+};
+
+static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae,
+				  void *in, void *out)
+{
+	struct adf_admin_comms *admin = accel_dev->admin;
+	int offset = ae * ADF_ADMINMSG_LEN * 2;
+	void __iomem *mailbox = admin->mailbox_addr;
+	int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE;
+	int times, received;
+
+	mutex_lock(&admin->lock);
+
+	if (ADF_CSR_RD(mailbox, mb_offset) == 1) {
+		mutex_unlock(&admin->lock);
+		return -EAGAIN;
+	}
+
+	memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
+	ADF_CSR_WR(mailbox, mb_offset, 1);
+	received = 0;
+	for (times = 0; times < 50; times++) {
+		msleep(20);
+		if (ADF_CSR_RD(mailbox, mb_offset) == 0) {
+			received = 1;
+			break;
+		}
+	}
+	if (received)
+		memcpy(out, admin->virt_addr + offset +
+		       ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
+	else
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to send admin msg to accelerator\n");
+
+	mutex_unlock(&admin->lock);
+	return received ? 0 : -EFAULT;
+}
+
+static int adf_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd)
+{
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+	struct icp_qat_fw_init_admin_req req;
+	struct icp_qat_fw_init_admin_resp resp;
+	int i;
+
+	memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req));
+	req.init_admin_cmd_id = cmd;
+
+	if (cmd == ICP_QAT_FW_CONSTANTS_CFG) {
+		req.init_cfg_sz = 1024;
+		req.init_cfg_ptr = accel_dev->admin->const_tbl_addr;
+	}
+	for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
+		memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp));
+		if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) ||
+		    resp.init_resp_hdr.status)
+			return -EFAULT;
+	}
+	return 0;
+}
+
+/**
+ * adf_send_admin_init() - Function sends init message to FW
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function sends admin init message to the FW
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_admin_init(struct adf_accel_dev *accel_dev)
+{
+	int ret = adf_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME);
+
+	if (ret)
+		return ret;
+	return adf_send_admin_cmd(accel_dev, ICP_QAT_FW_CONSTANTS_CFG);
+}
+EXPORT_SYMBOL_GPL(adf_send_admin_init);
+
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
+{
+	struct adf_admin_comms *admin;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_bar *pmisc =
+		&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+	void __iomem *csr = pmisc->virt_addr;
+	void __iomem *mailbox = (void __iomem *)((uintptr_t)csr +
+				 ADF_DH895XCC_MAILBOX_BASE_OFFSET);
+	u64 reg_val;
+
+	admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
+			     dev_to_node(&GET_DEV(accel_dev)));
+	if (!admin)
+		return -ENOMEM;
+	admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+					       &admin->phy_addr, GFP_KERNEL);
+	if (!admin->virt_addr) {
+		dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
+		kfree(admin);
+		return -ENOMEM;
+	}
+
+	admin->virt_tbl_addr = dma_zalloc_coherent(&GET_DEV(accel_dev),
+						   PAGE_SIZE,
+						   &admin->const_tbl_addr,
+						   GFP_KERNEL);
+	if (!admin->virt_tbl_addr) {
+		dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n");
+		dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+				  admin->virt_addr, admin->phy_addr);
+		kfree(admin);
+		return -ENOMEM;
+	}
+
+	memcpy(admin->virt_tbl_addr, const_tab, sizeof(const_tab));
+	reg_val = (u64)admin->phy_addr;
+	ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32);
+	ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val);
+	mutex_init(&admin->lock);
+	admin->mailbox_addr = mailbox;
+	accel_dev->admin = admin;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_init_admin_comms);
+
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev)
+{
+	struct adf_admin_comms *admin = accel_dev->admin;
+
+	if (!admin)
+		return;
+
+	if (admin->virt_addr)
+		dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+				  admin->virt_addr, admin->phy_addr);
+	if (admin->virt_tbl_addr)
+		dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+				  admin->virt_tbl_addr, admin->const_tbl_addr);
+
+	mutex_destroy(&admin->lock);
+	kfree(admin);
+	accel_dev->admin = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_exit_admin_comms);
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_aer.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_aer.c
new file mode 100644
index 0000000..da8a2d3
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_aer.c
@@ -0,0 +1,271 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+
+static struct workqueue_struct *device_reset_wq;
+
+static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
+					   pci_channel_state_t state)
+{
+	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+	dev_info(&pdev->dev, "Acceleration driver hardware error detected.\n");
+	if (!accel_dev) {
+		dev_err(&pdev->dev, "Can't find acceleration device\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	if (state == pci_channel_io_perm_failure) {
+		dev_err(&pdev->dev, "Can't recover from device error\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/* reset dev data */
+struct adf_reset_dev_data {
+	int mode;
+	struct adf_accel_dev *accel_dev;
+	struct completion compl;
+	struct work_struct reset_work;
+};
+
+void adf_reset_sbr(struct adf_accel_dev *accel_dev)
+{
+	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+	struct pci_dev *parent = pdev->bus->self;
+	uint16_t bridge_ctl = 0;
+
+	if (!parent)
+		parent = pdev;
+
+	if (!pci_wait_for_pending_transaction(pdev))
+		dev_info(&GET_DEV(accel_dev),
+			 "Transaction still in progress. Proceeding\n");
+
+	dev_info(&GET_DEV(accel_dev), "Secondary bus reset\n");
+
+	pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl);
+	bridge_ctl |= PCI_BRIDGE_CTL_BUS_RESET;
+	pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
+	msleep(100);
+	bridge_ctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+	pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
+	msleep(100);
+}
+EXPORT_SYMBOL_GPL(adf_reset_sbr);
+
+void adf_reset_flr(struct adf_accel_dev *accel_dev)
+{
+	pcie_flr(accel_to_pci_dev(accel_dev));
+}
+EXPORT_SYMBOL_GPL(adf_reset_flr);
+
+void adf_dev_restore(struct adf_accel_dev *accel_dev)
+{
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+	if (hw_device->reset_device) {
+		dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
+			 accel_dev->accel_id);
+		hw_device->reset_device(accel_dev);
+		pci_restore_state(pdev);
+		pci_save_state(pdev);
+	}
+}
+
+static void adf_device_reset_worker(struct work_struct *work)
+{
+	struct adf_reset_dev_data *reset_data =
+		  container_of(work, struct adf_reset_dev_data, reset_work);
+	struct adf_accel_dev *accel_dev = reset_data->accel_dev;
+
+	adf_dev_restarting_notify(accel_dev);
+	adf_dev_stop(accel_dev);
+	adf_dev_shutdown(accel_dev);
+	if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) {
+		/* The device hanged and we can't restart it so stop here */
+		dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
+		kfree(reset_data);
+		WARN(1, "QAT: device restart failed. Device is unusable\n");
+		return;
+	}
+	adf_dev_restarted_notify(accel_dev);
+	clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+
+	/* The dev is back alive. Notify the caller if in sync mode */
+	if (reset_data->mode == ADF_DEV_RESET_SYNC)
+		complete(&reset_data->compl);
+	else
+		kfree(reset_data);
+}
+
+static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
+				      enum adf_dev_reset_mode mode)
+{
+	struct adf_reset_dev_data *reset_data;
+
+	if (!adf_dev_started(accel_dev) ||
+	    test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
+		return 0;
+
+	set_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+	reset_data = kzalloc(sizeof(*reset_data), GFP_ATOMIC);
+	if (!reset_data)
+		return -ENOMEM;
+	reset_data->accel_dev = accel_dev;
+	init_completion(&reset_data->compl);
+	reset_data->mode = mode;
+	INIT_WORK(&reset_data->reset_work, adf_device_reset_worker);
+	queue_work(device_reset_wq, &reset_data->reset_work);
+
+	/* If in sync mode wait for the result */
+	if (mode == ADF_DEV_RESET_SYNC) {
+		int ret = 0;
+		/* Maximum device reset time is 10 seconds */
+		unsigned long wait_jiffies = msecs_to_jiffies(10000);
+		unsigned long timeout = wait_for_completion_timeout(
+				   &reset_data->compl, wait_jiffies);
+		if (!timeout) {
+			dev_err(&GET_DEV(accel_dev),
+				"Reset device timeout expired\n");
+			ret = -EFAULT;
+		}
+		kfree(reset_data);
+		return ret;
+	}
+	return 0;
+}
+
+static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev)
+{
+	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+	if (!accel_dev) {
+		pr_err("QAT: Can't find acceleration device\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+	pci_cleanup_aer_uncorrect_error_status(pdev);
+	if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC))
+		return PCI_ERS_RESULT_DISCONNECT;
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void adf_resume(struct pci_dev *pdev)
+{
+	dev_info(&pdev->dev, "Acceleration driver reset completed\n");
+	dev_info(&pdev->dev, "Device is up and running\n");
+}
+
+static const struct pci_error_handlers adf_err_handler = {
+	.error_detected = adf_error_detected,
+	.slot_reset = adf_slot_reset,
+	.resume = adf_resume,
+};
+
+/**
+ * adf_enable_aer() - Enable Advance Error Reporting for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ * @adf:        PCI device driver owning the given acceleration device.
+ *
+ * Function enables PCI Advance Error Reporting for the
+ * QAT acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf)
+{
+	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+	adf->err_handler = &adf_err_handler;
+	pci_enable_pcie_error_reporting(pdev);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_enable_aer);
+
+/**
+ * adf_disable_aer() - Enable Advance Error Reporting for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function disables PCI Advance Error Reporting for the
+ * QAT acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_disable_aer(struct adf_accel_dev *accel_dev)
+{
+	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+	pci_disable_pcie_error_reporting(pdev);
+}
+EXPORT_SYMBOL_GPL(adf_disable_aer);
+
+int adf_init_aer(void)
+{
+	device_reset_wq = alloc_workqueue("qat_device_reset_wq",
+					  WQ_MEM_RECLAIM, 0);
+	return !device_reset_wq ? -EFAULT : 0;
+}
+
+void adf_exit_aer(void)
+{
+	if (device_reset_wq)
+		destroy_workqueue(device_reset_wq);
+	device_reset_wq = NULL;
+}
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_cfg.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_cfg.c
new file mode 100644
index 0000000..d087979
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_cfg.c
@@ -0,0 +1,367 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static DEFINE_MUTEX(qat_cfg_read_lock);
+
+static void *qat_dev_cfg_start(struct seq_file *sfile, loff_t *pos)
+{
+	struct adf_cfg_device_data *dev_cfg = sfile->private;
+
+	mutex_lock(&qat_cfg_read_lock);
+	return seq_list_start(&dev_cfg->sec_list, *pos);
+}
+
+static int qat_dev_cfg_show(struct seq_file *sfile, void *v)
+{
+	struct list_head *list;
+	struct adf_cfg_section *sec =
+				list_entry(v, struct adf_cfg_section, list);
+
+	seq_printf(sfile, "[%s]\n", sec->name);
+	list_for_each(list, &sec->param_head) {
+		struct adf_cfg_key_val *ptr =
+			list_entry(list, struct adf_cfg_key_val, list);
+		seq_printf(sfile, "%s = %s\n", ptr->key, ptr->val);
+	}
+	return 0;
+}
+
+static void *qat_dev_cfg_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+	struct adf_cfg_device_data *dev_cfg = sfile->private;
+
+	return seq_list_next(v, &dev_cfg->sec_list, pos);
+}
+
+static void qat_dev_cfg_stop(struct seq_file *sfile, void *v)
+{
+	mutex_unlock(&qat_cfg_read_lock);
+}
+
+static const struct seq_operations qat_dev_cfg_sops = {
+	.start = qat_dev_cfg_start,
+	.next = qat_dev_cfg_next,
+	.stop = qat_dev_cfg_stop,
+	.show = qat_dev_cfg_show
+};
+
+static int qat_dev_cfg_open(struct inode *inode, struct file *file)
+{
+	int ret = seq_open(file, &qat_dev_cfg_sops);
+
+	if (!ret) {
+		struct seq_file *seq_f = file->private_data;
+
+		seq_f->private = inode->i_private;
+	}
+	return ret;
+}
+
+static const struct file_operations qat_dev_cfg_fops = {
+	.open = qat_dev_cfg_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release
+};
+
+/**
+ * adf_cfg_dev_add() - Create an acceleration device configuration table.
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function creates a configuration table for the given acceleration device.
+ * The table stores device specific config values.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
+{
+	struct adf_cfg_device_data *dev_cfg_data;
+
+	dev_cfg_data = kzalloc(sizeof(*dev_cfg_data), GFP_KERNEL);
+	if (!dev_cfg_data)
+		return -ENOMEM;
+	INIT_LIST_HEAD(&dev_cfg_data->sec_list);
+	init_rwsem(&dev_cfg_data->lock);
+	accel_dev->cfg = dev_cfg_data;
+
+	/* accel_dev->debugfs_dir should always be non-NULL here */
+	dev_cfg_data->debug = debugfs_create_file("dev_cfg", S_IRUSR,
+						  accel_dev->debugfs_dir,
+						  dev_cfg_data,
+						  &qat_dev_cfg_fops);
+	if (!dev_cfg_data->debug) {
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to create qat cfg debugfs entry.\n");
+		kfree(dev_cfg_data);
+		accel_dev->cfg = NULL;
+		return -EFAULT;
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_dev_add);
+
+static void adf_cfg_section_del_all(struct list_head *head);
+
+void adf_cfg_del_all(struct adf_accel_dev *accel_dev)
+{
+	struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+
+	down_write(&dev_cfg_data->lock);
+	adf_cfg_section_del_all(&dev_cfg_data->sec_list);
+	up_write(&dev_cfg_data->lock);
+	clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+}
+
+/**
+ * adf_cfg_dev_remove() - Clears acceleration device configuration table.
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function removes configuration table from the given acceleration device
+ * and frees all allocated memory.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev)
+{
+	struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+
+	if (!dev_cfg_data)
+		return;
+
+	down_write(&dev_cfg_data->lock);
+	adf_cfg_section_del_all(&dev_cfg_data->sec_list);
+	up_write(&dev_cfg_data->lock);
+	debugfs_remove(dev_cfg_data->debug);
+	kfree(dev_cfg_data);
+	accel_dev->cfg = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_dev_remove);
+
+static void adf_cfg_keyval_add(struct adf_cfg_key_val *new,
+			       struct adf_cfg_section *sec)
+{
+	list_add_tail(&new->list, &sec->param_head);
+}
+
+static void adf_cfg_keyval_del_all(struct list_head *head)
+{
+	struct list_head *list_ptr, *tmp;
+
+	list_for_each_prev_safe(list_ptr, tmp, head) {
+		struct adf_cfg_key_val *ptr =
+			list_entry(list_ptr, struct adf_cfg_key_val, list);
+		list_del(list_ptr);
+		kfree(ptr);
+	}
+}
+
+static void adf_cfg_section_del_all(struct list_head *head)
+{
+	struct adf_cfg_section *ptr;
+	struct list_head *list, *tmp;
+
+	list_for_each_prev_safe(list, tmp, head) {
+		ptr = list_entry(list, struct adf_cfg_section, list);
+		adf_cfg_keyval_del_all(&ptr->param_head);
+		list_del(list);
+		kfree(ptr);
+	}
+}
+
+static struct adf_cfg_key_val *adf_cfg_key_value_find(struct adf_cfg_section *s,
+						      const char *key)
+{
+	struct list_head *list;
+
+	list_for_each(list, &s->param_head) {
+		struct adf_cfg_key_val *ptr =
+			list_entry(list, struct adf_cfg_key_val, list);
+		if (!strcmp(ptr->key, key))
+			return ptr;
+	}
+	return NULL;
+}
+
+static struct adf_cfg_section *adf_cfg_sec_find(struct adf_accel_dev *accel_dev,
+						const char *sec_name)
+{
+	struct adf_cfg_device_data *cfg = accel_dev->cfg;
+	struct list_head *list;
+
+	list_for_each(list, &cfg->sec_list) {
+		struct adf_cfg_section *ptr =
+			list_entry(list, struct adf_cfg_section, list);
+		if (!strcmp(ptr->name, sec_name))
+			return ptr;
+	}
+	return NULL;
+}
+
+static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev,
+			       const char *sec_name,
+			       const char *key_name,
+			       char *val)
+{
+	struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, sec_name);
+	struct adf_cfg_key_val *keyval = NULL;
+
+	if (sec)
+		keyval = adf_cfg_key_value_find(sec, key_name);
+	if (keyval) {
+		memcpy(val, keyval->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES);
+		return 0;
+	}
+	return -1;
+}
+
+/**
+ * adf_cfg_add_key_value_param() - Add key-value config entry to config table.
+ * @accel_dev:  Pointer to acceleration device.
+ * @section_name: Name of the section where the param will be added
+ * @key: The key string
+ * @val: Value pain for the given @key
+ * @type: Type - string, int or address
+ *
+ * Function adds configuration key - value entry in the appropriate section
+ * in the given acceleration device
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
+				const char *section_name,
+				const char *key, const void *val,
+				enum adf_cfg_val_type type)
+{
+	struct adf_cfg_device_data *cfg = accel_dev->cfg;
+	struct adf_cfg_key_val *key_val;
+	struct adf_cfg_section *section = adf_cfg_sec_find(accel_dev,
+							   section_name);
+	if (!section)
+		return -EFAULT;
+
+	key_val = kzalloc(sizeof(*key_val), GFP_KERNEL);
+	if (!key_val)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&key_val->list);
+	strlcpy(key_val->key, key, sizeof(key_val->key));
+
+	if (type == ADF_DEC) {
+		snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
+			 "%ld", (*((long *)val)));
+	} else if (type == ADF_STR) {
+		strlcpy(key_val->val, (char *)val, sizeof(key_val->val));
+	} else if (type == ADF_HEX) {
+		snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
+			 "0x%lx", (unsigned long)val);
+	} else {
+		dev_err(&GET_DEV(accel_dev), "Unknown type given.\n");
+		kfree(key_val);
+		return -1;
+	}
+	key_val->type = type;
+	down_write(&cfg->lock);
+	adf_cfg_keyval_add(key_val, section);
+	up_write(&cfg->lock);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_add_key_value_param);
+
+/**
+ * adf_cfg_section_add() - Add config section entry to config table.
+ * @accel_dev:  Pointer to acceleration device.
+ * @name: Name of the section
+ *
+ * Function adds configuration section where key - value entries
+ * will be stored.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name)
+{
+	struct adf_cfg_device_data *cfg = accel_dev->cfg;
+	struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, name);
+
+	if (sec)
+		return 0;
+
+	sec = kzalloc(sizeof(*sec), GFP_KERNEL);
+	if (!sec)
+		return -ENOMEM;
+
+	strlcpy(sec->name, name, sizeof(sec->name));
+	INIT_LIST_HEAD(&sec->param_head);
+	down_write(&cfg->lock);
+	list_add_tail(&sec->list, &cfg->sec_list);
+	up_write(&cfg->lock);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_section_add);
+
+int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev,
+			    const char *section, const char *name,
+			    char *value)
+{
+	struct adf_cfg_device_data *cfg = accel_dev->cfg;
+	int ret;
+
+	down_read(&cfg->lock);
+	ret = adf_cfg_key_val_get(accel_dev, section, name, value);
+	up_read(&cfg->lock);
+	return ret;
+}
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_cfg.h b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_cfg.h
new file mode 100644
index 0000000..6a9c6f6
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_cfg.h
@@ -0,0 +1,87 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_CFG_H_
+#define ADF_CFG_H_
+
+#include <linux/list.h>
+#include <linux/rwsem.h>
+#include <linux/debugfs.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg_common.h"
+#include "adf_cfg_strings.h"
+
+struct adf_cfg_key_val {
+	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+	enum adf_cfg_val_type type;
+	struct list_head list;
+};
+
+struct adf_cfg_section {
+	char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
+	struct list_head list;
+	struct list_head param_head;
+};
+
+struct adf_cfg_device_data {
+	struct list_head sec_list;
+	struct dentry *debug;
+	struct rw_semaphore lock;
+};
+
+int adf_cfg_dev_add(struct adf_accel_dev *accel_dev);
+void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev);
+int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name);
+void adf_cfg_del_all(struct adf_accel_dev *accel_dev);
+int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
+				const char *section_name,
+				const char *key, const void *val,
+				enum adf_cfg_val_type type);
+int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev,
+			    const char *section, const char *name, char *value);
+
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_cfg_common.h b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_cfg_common.h
new file mode 100644
index 0000000..1211261
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_cfg_common.h
@@ -0,0 +1,106 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_CFG_COMMON_H_
+#define ADF_CFG_COMMON_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define ADF_CFG_MAX_STR_LEN 64
+#define ADF_CFG_MAX_KEY_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
+#define ADF_CFG_MAX_VAL_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
+#define ADF_CFG_MAX_SECTION_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
+#define ADF_CFG_BASE_DEC 10
+#define ADF_CFG_BASE_HEX 16
+#define ADF_CFG_ALL_DEVICES 0xFE
+#define ADF_CFG_NO_DEVICE 0xFF
+#define ADF_CFG_AFFINITY_WHATEVER 0xFF
+#define MAX_DEVICE_NAME_SIZE 32
+#define ADF_MAX_DEVICES (32 * 32)
+#define ADF_DEVS_ARRAY_SIZE BITS_TO_LONGS(ADF_MAX_DEVICES)
+
+enum adf_cfg_val_type {
+	ADF_DEC,
+	ADF_HEX,
+	ADF_STR
+};
+
+enum adf_device_type {
+	DEV_UNKNOWN = 0,
+	DEV_DH895XCC,
+	DEV_DH895XCCVF,
+	DEV_C62X,
+	DEV_C62XVF,
+	DEV_C3XXX,
+	DEV_C3XXXVF
+};
+
+struct adf_dev_status_info {
+	enum adf_device_type type;
+	u32 accel_id;
+	u32 instance_id;
+	uint8_t num_ae;
+	uint8_t num_accel;
+	uint8_t num_logical_accel;
+	uint8_t banks_per_accel;
+	uint8_t state;
+	uint8_t bus;
+	uint8_t dev;
+	uint8_t fun;
+	char name[MAX_DEVICE_NAME_SIZE];
+};
+
+#define ADF_CTL_IOC_MAGIC 'a'
+#define IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS _IOW(ADF_CTL_IOC_MAGIC, 0, \
+		struct adf_user_cfg_ctl_data)
+#define IOCTL_STOP_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 1, \
+		struct adf_user_cfg_ctl_data)
+#define IOCTL_START_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 2, \
+		struct adf_user_cfg_ctl_data)
+#define IOCTL_STATUS_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 3, uint32_t)
+#define IOCTL_GET_NUM_DEVICES _IOW(ADF_CTL_IOC_MAGIC, 4, int32_t)
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_cfg_strings.h
new file mode 100644
index 0000000..7632ed0
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_cfg_strings.h
@@ -0,0 +1,81 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_CFG_STRINGS_H_
+#define ADF_CFG_STRINGS_H_
+
+#define ADF_GENERAL_SEC "GENERAL"
+#define ADF_KERNEL_SEC "KERNEL"
+#define ADF_ACCEL_SEC "Accelerator"
+#define ADF_NUM_CY "NumberCyInstances"
+#define ADF_NUM_DC "NumberDcInstances"
+#define ADF_RING_SYM_SIZE "NumConcurrentSymRequests"
+#define ADF_RING_ASYM_SIZE "NumConcurrentAsymRequests"
+#define ADF_RING_DC_SIZE "NumConcurrentRequests"
+#define ADF_RING_ASYM_TX "RingAsymTx"
+#define ADF_RING_SYM_TX "RingSymTx"
+#define ADF_RING_ASYM_RX "RingAsymRx"
+#define ADF_RING_SYM_RX "RingSymRx"
+#define ADF_RING_DC_TX "RingTx"
+#define ADF_RING_DC_RX "RingRx"
+#define ADF_ETRMGR_BANK "Bank"
+#define ADF_RING_BANK_NUM "BankNumber"
+#define ADF_CY "Cy"
+#define ADF_DC "Dc"
+#define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled"
+#define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \
+	ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCING_ENABLED
+#define ADF_ETRMGR_COALESCE_TIMER "InterruptCoalescingTimerNs"
+#define ADF_ETRMGR_COALESCE_TIMER_FORMAT \
+	ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCE_TIMER
+#define ADF_ETRMGR_COALESCING_MSG_ENABLED "InterruptCoalescingNumResponses"
+#define ADF_ETRMGR_COALESCING_MSG_ENABLED_FORMAT \
+	ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCING_MSG_ENABLED
+#define ADF_ETRMGR_CORE_AFFINITY "CoreAffinity"
+#define ADF_ETRMGR_CORE_AFFINITY_FORMAT \
+	ADF_ETRMGR_BANK "%d" ADF_ETRMGR_CORE_AFFINITY
+#define ADF_ACCEL_STR "Accelerator%d"
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_cfg_user.h b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_cfg_user.h
new file mode 100644
index 0000000..b5484bf
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_cfg_user.h
@@ -0,0 +1,82 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_CFG_USER_H_
+#define ADF_CFG_USER_H_
+
+#include "adf_cfg_common.h"
+#include "adf_cfg_strings.h"
+
+struct adf_user_cfg_key_val {
+	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+	union {
+		struct adf_user_cfg_key_val *next;
+		uint64_t padding3;
+	};
+	enum adf_cfg_val_type type;
+} __packed;
+
+struct adf_user_cfg_section {
+	char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
+	union {
+		struct adf_user_cfg_key_val *params;
+		uint64_t padding1;
+	};
+	union {
+		struct adf_user_cfg_section *next;
+		uint64_t padding3;
+	};
+} __packed;
+
+struct adf_user_cfg_ctl_data {
+	union {
+		struct adf_user_cfg_section *config_section;
+		uint64_t padding;
+	};
+	uint8_t device_id;
+} __packed;
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_common_drv.h b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_common_drv.h
new file mode 100644
index 0000000..d78f8d5
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -0,0 +1,294 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_DRV_H
+#define ADF_DRV_H
+
+#include <linux/list.h>
+#include <linux/pci.h>
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_loader_handle.h"
+#include "icp_qat_hal.h"
+
+#define ADF_MAJOR_VERSION	0
+#define ADF_MINOR_VERSION	6
+#define ADF_BUILD_VERSION	0
+#define ADF_DRV_VERSION		__stringify(ADF_MAJOR_VERSION) "." \
+				__stringify(ADF_MINOR_VERSION) "." \
+				__stringify(ADF_BUILD_VERSION)
+
+#define ADF_STATUS_RESTARTING 0
+#define ADF_STATUS_STARTING 1
+#define ADF_STATUS_CONFIGURED 2
+#define ADF_STATUS_STARTED 3
+#define ADF_STATUS_AE_INITIALISED 4
+#define ADF_STATUS_AE_UCODE_LOADED 5
+#define ADF_STATUS_AE_STARTED 6
+#define ADF_STATUS_PF_RUNNING 7
+#define ADF_STATUS_IRQ_ALLOCATED 8
+
+enum adf_dev_reset_mode {
+	ADF_DEV_RESET_ASYNC = 0,
+	ADF_DEV_RESET_SYNC
+};
+
+enum adf_event {
+	ADF_EVENT_INIT = 0,
+	ADF_EVENT_START,
+	ADF_EVENT_STOP,
+	ADF_EVENT_SHUTDOWN,
+	ADF_EVENT_RESTARTING,
+	ADF_EVENT_RESTARTED,
+};
+
+struct service_hndl {
+	int (*event_hld)(struct adf_accel_dev *accel_dev,
+			 enum adf_event event);
+	unsigned long init_status[ADF_DEVS_ARRAY_SIZE];
+	unsigned long start_status[ADF_DEVS_ARRAY_SIZE];
+	char *name;
+	struct list_head list;
+};
+
+static inline int get_current_node(void)
+{
+	return topology_physical_package_id(raw_smp_processor_id());
+}
+
+int adf_service_register(struct service_hndl *service);
+int adf_service_unregister(struct service_hndl *service);
+
+int adf_dev_init(struct adf_accel_dev *accel_dev);
+int adf_dev_start(struct adf_accel_dev *accel_dev);
+void adf_dev_stop(struct adf_accel_dev *accel_dev);
+void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
+
+int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr);
+void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
+int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
+void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info);
+void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
+void adf_clean_vf_map(bool);
+
+int adf_ctl_dev_register(void);
+void adf_ctl_dev_unregister(void);
+int adf_processes_dev_register(void);
+void adf_processes_dev_unregister(void);
+
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
+		       struct adf_accel_dev *pf);
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
+		       struct adf_accel_dev *pf);
+struct list_head *adf_devmgr_get_head(void);
+struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id);
+struct adf_accel_dev *adf_devmgr_get_first(void);
+struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev);
+int adf_devmgr_verify_id(uint32_t id);
+void adf_devmgr_get_num_dev(uint32_t *num);
+int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev);
+int adf_dev_started(struct adf_accel_dev *accel_dev);
+int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev);
+int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev);
+int adf_ae_init(struct adf_accel_dev *accel_dev);
+int adf_ae_shutdown(struct adf_accel_dev *accel_dev);
+int adf_ae_fw_load(struct adf_accel_dev *accel_dev);
+void adf_ae_fw_release(struct adf_accel_dev *accel_dev);
+int adf_ae_start(struct adf_accel_dev *accel_dev);
+int adf_ae_stop(struct adf_accel_dev *accel_dev);
+
+int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
+void adf_disable_aer(struct adf_accel_dev *accel_dev);
+void adf_reset_sbr(struct adf_accel_dev *accel_dev);
+void adf_reset_flr(struct adf_accel_dev *accel_dev);
+void adf_dev_restore(struct adf_accel_dev *accel_dev);
+int adf_init_aer(void);
+void adf_exit_aer(void);
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
+int adf_send_admin_init(struct adf_accel_dev *accel_dev);
+int adf_init_arb(struct adf_accel_dev *accel_dev);
+void adf_exit_arb(struct adf_accel_dev *accel_dev);
+void adf_update_ring_arb(struct adf_etr_ring_data *ring);
+
+int adf_dev_get(struct adf_accel_dev *accel_dev);
+void adf_dev_put(struct adf_accel_dev *accel_dev);
+int adf_dev_in_use(struct adf_accel_dev *accel_dev);
+int adf_init_etr_data(struct adf_accel_dev *accel_dev);
+void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev);
+int qat_crypto_register(void);
+int qat_crypto_unregister(void);
+int qat_crypto_dev_config(struct adf_accel_dev *accel_dev);
+struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
+void qat_crypto_put_instance(struct qat_crypto_instance *inst);
+void qat_alg_callback(void *resp);
+void qat_alg_asym_callback(void *resp);
+int qat_algs_register(void);
+void qat_algs_unregister(void);
+int qat_asym_algs_register(void);
+void qat_asym_algs_unregister(void);
+
+int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
+void adf_isr_resource_free(struct adf_accel_dev *accel_dev);
+int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
+void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev);
+
+int qat_hal_init(struct adf_accel_dev *accel_dev);
+void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
+void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+		   unsigned int ctx_mask);
+void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+		  unsigned int ctx_mask);
+void qat_hal_reset(struct icp_qat_fw_loader_handle *handle);
+int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle);
+void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
+			  unsigned char ae, unsigned int ctx_mask);
+int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
+			    unsigned int ae);
+int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
+			   unsigned char ae, enum icp_qat_uof_regtype lm_type,
+			   unsigned char mode);
+int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
+			    unsigned char ae, unsigned char mode);
+int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
+			   unsigned char ae, unsigned char mode);
+void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
+		    unsigned char ae, unsigned int ctx_mask, unsigned int upc);
+void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
+		       unsigned char ae, unsigned int uaddr,
+		       unsigned int words_num, uint64_t *uword);
+void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+		     unsigned int uword_addr, unsigned int words_num,
+		     unsigned int *data);
+int qat_hal_get_ins_num(void);
+int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
+			unsigned char ae,
+			struct icp_qat_uof_batch_init *lm_init_header);
+int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
+		     unsigned char ae, unsigned char ctx_mask,
+		     enum icp_qat_uof_regtype reg_type,
+		     unsigned short reg_num, unsigned int regdata);
+int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+			 unsigned char ae, unsigned char ctx_mask,
+			 enum icp_qat_uof_regtype reg_type,
+			 unsigned short reg_num, unsigned int regdata);
+int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
+			 unsigned char ae, unsigned char ctx_mask,
+			 enum icp_qat_uof_regtype reg_type,
+			 unsigned short reg_num, unsigned int regdata);
+int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
+		    unsigned char ae, unsigned char ctx_mask,
+		    unsigned short reg_num, unsigned int regdata);
+int qat_hal_wr_lm(struct icp_qat_fw_loader_handle *handle,
+		  unsigned char ae, unsigned short lm_addr, unsigned int value);
+int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle);
+void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle);
+int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, void *addr_ptr,
+		       int mem_size);
+int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
+		     void *addr_ptr, int mem_size);
+#if defined(CONFIG_PCI_IOV)
+int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
+void adf_disable_sriov(struct adf_accel_dev *accel_dev);
+void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+				  uint32_t vf_mask);
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+				 uint32_t vf_mask);
+void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+
+int adf_vf2pf_init(struct adf_accel_dev *accel_dev);
+void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev);
+int adf_init_pf_wq(void);
+void adf_exit_pf_wq(void);
+int adf_init_vf_wq(void);
+void adf_exit_vf_wq(void);
+#else
+static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+	return 0;
+}
+
+static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
+{
+	return 0;
+}
+
+static inline void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline int adf_init_pf_wq(void)
+{
+	return 0;
+}
+
+static inline void adf_exit_pf_wq(void)
+{
+}
+
+static inline int adf_init_vf_wq(void)
+{
+	return 0;
+}
+
+static inline void adf_exit_vf_wq(void)
+{
+}
+
+#endif
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_ctl_drv.c
new file mode 100644
index 0000000..abc7a7f
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -0,0 +1,512 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/bitops.h>
+#include <linux/pci.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/crypto.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_cfg_common.h"
+#include "adf_cfg_user.h"
+
+#define DEVICE_NAME "qat_adf_ctl"
+
+static DEFINE_MUTEX(adf_ctl_lock);
+static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
+
+static const struct file_operations adf_ctl_ops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = adf_ctl_ioctl,
+	.compat_ioctl = adf_ctl_ioctl,
+};
+
+struct adf_ctl_drv_info {
+	unsigned int major;
+	struct cdev drv_cdev;
+	struct class *drv_class;
+};
+
+static struct adf_ctl_drv_info adf_ctl_drv;
+
+static void adf_chr_drv_destroy(void)
+{
+	device_destroy(adf_ctl_drv.drv_class, MKDEV(adf_ctl_drv.major, 0));
+	cdev_del(&adf_ctl_drv.drv_cdev);
+	class_destroy(adf_ctl_drv.drv_class);
+	unregister_chrdev_region(MKDEV(adf_ctl_drv.major, 0), 1);
+}
+
+static int adf_chr_drv_create(void)
+{
+	dev_t dev_id;
+	struct device *drv_device;
+
+	if (alloc_chrdev_region(&dev_id, 0, 1, DEVICE_NAME)) {
+		pr_err("QAT: unable to allocate chrdev region\n");
+		return -EFAULT;
+	}
+
+	adf_ctl_drv.drv_class = class_create(THIS_MODULE, DEVICE_NAME);
+	if (IS_ERR(adf_ctl_drv.drv_class)) {
+		pr_err("QAT: class_create failed for adf_ctl\n");
+		goto err_chrdev_unreg;
+	}
+	adf_ctl_drv.major = MAJOR(dev_id);
+	cdev_init(&adf_ctl_drv.drv_cdev, &adf_ctl_ops);
+	if (cdev_add(&adf_ctl_drv.drv_cdev, dev_id, 1)) {
+		pr_err("QAT: cdev add failed\n");
+		goto err_class_destr;
+	}
+
+	drv_device = device_create(adf_ctl_drv.drv_class, NULL,
+				   MKDEV(adf_ctl_drv.major, 0),
+				   NULL, DEVICE_NAME);
+	if (IS_ERR(drv_device)) {
+		pr_err("QAT: failed to create device\n");
+		goto err_cdev_del;
+	}
+	return 0;
+err_cdev_del:
+	cdev_del(&adf_ctl_drv.drv_cdev);
+err_class_destr:
+	class_destroy(adf_ctl_drv.drv_class);
+err_chrdev_unreg:
+	unregister_chrdev_region(dev_id, 1);
+	return -EFAULT;
+}
+
+static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data,
+				   unsigned long arg)
+{
+	struct adf_user_cfg_ctl_data *cfg_data;
+
+	cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL);
+	if (!cfg_data)
+		return -ENOMEM;
+
+	/* Initialize device id to NO DEVICE as 0 is a valid device id */
+	cfg_data->device_id = ADF_CFG_NO_DEVICE;
+
+	if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) {
+		pr_err("QAT: failed to copy from user cfg_data.\n");
+		kfree(cfg_data);
+		return -EIO;
+	}
+
+	*ctl_data = cfg_data;
+	return 0;
+}
+
+static int adf_add_key_value_data(struct adf_accel_dev *accel_dev,
+				  const char *section,
+				  const struct adf_user_cfg_key_val *key_val)
+{
+	if (key_val->type == ADF_HEX) {
+		long *ptr = (long *)key_val->val;
+		long val = *ptr;
+
+		if (adf_cfg_add_key_value_param(accel_dev, section,
+						key_val->key, (void *)val,
+						key_val->type)) {
+			dev_err(&GET_DEV(accel_dev),
+				"failed to add hex keyvalue.\n");
+			return -EFAULT;
+		}
+	} else {
+		if (adf_cfg_add_key_value_param(accel_dev, section,
+						key_val->key, key_val->val,
+						key_val->type)) {
+			dev_err(&GET_DEV(accel_dev),
+				"failed to add keyvalue.\n");
+			return -EFAULT;
+		}
+	}
+	return 0;
+}
+
+static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
+				   struct adf_user_cfg_ctl_data *ctl_data)
+{
+	struct adf_user_cfg_key_val key_val;
+	struct adf_user_cfg_key_val *params_head;
+	struct adf_user_cfg_section section, *section_head;
+
+	section_head = ctl_data->config_section;
+
+	while (section_head) {
+		if (copy_from_user(&section, (void __user *)section_head,
+				   sizeof(*section_head))) {
+			dev_err(&GET_DEV(accel_dev),
+				"failed to copy section info\n");
+			goto out_err;
+		}
+
+		if (adf_cfg_section_add(accel_dev, section.name)) {
+			dev_err(&GET_DEV(accel_dev),
+				"failed to add section.\n");
+			goto out_err;
+		}
+
+		params_head = section.params;
+
+		while (params_head) {
+			if (copy_from_user(&key_val, (void __user *)params_head,
+					   sizeof(key_val))) {
+				dev_err(&GET_DEV(accel_dev),
+					"Failed to copy keyvalue.\n");
+				goto out_err;
+			}
+			if (adf_add_key_value_data(accel_dev, section.name,
+						   &key_val)) {
+				goto out_err;
+			}
+			params_head = key_val.next;
+		}
+		section_head = section.next;
+	}
+	return 0;
+out_err:
+	adf_cfg_del_all(accel_dev);
+	return -EFAULT;
+}
+
+static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd,
+				    unsigned long arg)
+{
+	int ret;
+	struct adf_user_cfg_ctl_data *ctl_data;
+	struct adf_accel_dev *accel_dev;
+
+	ret = adf_ctl_alloc_resources(&ctl_data, arg);
+	if (ret)
+		return ret;
+
+	accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
+	if (!accel_dev) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	if (adf_dev_started(accel_dev)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	if (adf_copy_key_value_data(accel_dev, ctl_data)) {
+		ret = -EFAULT;
+		goto out;
+	}
+	set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+out:
+	kfree(ctl_data);
+	return ret;
+}
+
+static int adf_ctl_is_device_in_use(int id)
+{
+	struct adf_accel_dev *dev;
+
+	list_for_each_entry(dev, adf_devmgr_get_head(), list) {
+		if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
+			if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) {
+				dev_info(&GET_DEV(dev),
+					 "device qat_dev%d is busy\n",
+					 dev->accel_id);
+				return -EBUSY;
+			}
+		}
+	}
+	return 0;
+}
+
+static void adf_ctl_stop_devices(uint32_t id)
+{
+	struct adf_accel_dev *accel_dev;
+
+	list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
+		if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
+			if (!adf_dev_started(accel_dev))
+				continue;
+
+			/* First stop all VFs */
+			if (!accel_dev->is_vf)
+				continue;
+
+			adf_dev_stop(accel_dev);
+			adf_dev_shutdown(accel_dev);
+		}
+	}
+
+	list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
+		if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
+			if (!adf_dev_started(accel_dev))
+				continue;
+
+			adf_dev_stop(accel_dev);
+			adf_dev_shutdown(accel_dev);
+		}
+	}
+}
+
+static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
+				  unsigned long arg)
+{
+	int ret;
+	struct adf_user_cfg_ctl_data *ctl_data;
+
+	ret = adf_ctl_alloc_resources(&ctl_data, arg);
+	if (ret)
+		return ret;
+
+	if (adf_devmgr_verify_id(ctl_data->device_id)) {
+		pr_err("QAT: Device %d not found\n", ctl_data->device_id);
+		ret = -ENODEV;
+		goto out;
+	}
+
+	ret = adf_ctl_is_device_in_use(ctl_data->device_id);
+	if (ret)
+		goto out;
+
+	if (ctl_data->device_id == ADF_CFG_ALL_DEVICES)
+		pr_info("QAT: Stopping all acceleration devices.\n");
+	else
+		pr_info("QAT: Stopping acceleration device qat_dev%d.\n",
+			ctl_data->device_id);
+
+	adf_ctl_stop_devices(ctl_data->device_id);
+
+out:
+	kfree(ctl_data);
+	return ret;
+}
+
+static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
+				   unsigned long arg)
+{
+	int ret;
+	struct adf_user_cfg_ctl_data *ctl_data;
+	struct adf_accel_dev *accel_dev;
+
+	ret = adf_ctl_alloc_resources(&ctl_data, arg);
+	if (ret)
+		return ret;
+
+	ret = -ENODEV;
+	accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
+	if (!accel_dev)
+		goto out;
+
+	if (!adf_dev_started(accel_dev)) {
+		dev_info(&GET_DEV(accel_dev),
+			 "Starting acceleration device qat_dev%d.\n",
+			 ctl_data->device_id);
+		ret = adf_dev_init(accel_dev);
+		if (!ret)
+			ret = adf_dev_start(accel_dev);
+	} else {
+		dev_info(&GET_DEV(accel_dev),
+			 "Acceleration device qat_dev%d already started.\n",
+			 ctl_data->device_id);
+	}
+	if (ret) {
+		dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
+			ctl_data->device_id);
+		adf_dev_stop(accel_dev);
+		adf_dev_shutdown(accel_dev);
+	}
+out:
+	kfree(ctl_data);
+	return ret;
+}
+
+static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd,
+					 unsigned long arg)
+{
+	uint32_t num_devices = 0;
+
+	adf_devmgr_get_num_dev(&num_devices);
+	if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
+				    unsigned long arg)
+{
+	struct adf_hw_device_data *hw_data;
+	struct adf_dev_status_info dev_info;
+	struct adf_accel_dev *accel_dev;
+
+	if (copy_from_user(&dev_info, (void __user *)arg,
+			   sizeof(struct adf_dev_status_info))) {
+		pr_err("QAT: failed to copy from user.\n");
+		return -EFAULT;
+	}
+
+	accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
+	if (!accel_dev)
+		return -ENODEV;
+
+	hw_data = accel_dev->hw_device;
+	dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
+	dev_info.num_ae = hw_data->get_num_aes(hw_data);
+	dev_info.num_accel = hw_data->get_num_accels(hw_data);
+	dev_info.num_logical_accel = hw_data->num_logical_accel;
+	dev_info.banks_per_accel = hw_data->num_banks
+					/ hw_data->num_logical_accel;
+	strlcpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
+	dev_info.instance_id = hw_data->instance_id;
+	dev_info.type = hw_data->dev_class->type;
+	dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number;
+	dev_info.dev = PCI_SLOT(accel_to_pci_dev(accel_dev)->devfn);
+	dev_info.fun = PCI_FUNC(accel_to_pci_dev(accel_dev)->devfn);
+
+	if (copy_to_user((void __user *)arg, &dev_info,
+			 sizeof(struct adf_dev_status_info))) {
+		dev_err(&GET_DEV(accel_dev), "failed to copy status.\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+	int ret;
+
+	if (mutex_lock_interruptible(&adf_ctl_lock))
+		return -EFAULT;
+
+	switch (cmd) {
+	case IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS:
+		ret = adf_ctl_ioctl_dev_config(fp, cmd, arg);
+		break;
+
+	case IOCTL_STOP_ACCEL_DEV:
+		ret = adf_ctl_ioctl_dev_stop(fp, cmd, arg);
+		break;
+
+	case IOCTL_START_ACCEL_DEV:
+		ret = adf_ctl_ioctl_dev_start(fp, cmd, arg);
+		break;
+
+	case IOCTL_GET_NUM_DEVICES:
+		ret = adf_ctl_ioctl_get_num_devices(fp, cmd, arg);
+		break;
+
+	case IOCTL_STATUS_ACCEL_DEV:
+		ret = adf_ctl_ioctl_get_status(fp, cmd, arg);
+		break;
+	default:
+		pr_err("QAT: Invalid ioctl\n");
+		ret = -EFAULT;
+		break;
+	}
+	mutex_unlock(&adf_ctl_lock);
+	return ret;
+}
+
+static int __init adf_register_ctl_device_driver(void)
+{
+	mutex_init(&adf_ctl_lock);
+
+	if (adf_chr_drv_create())
+		goto err_chr_dev;
+
+	if (adf_init_aer())
+		goto err_aer;
+
+	if (adf_init_pf_wq())
+		goto err_pf_wq;
+
+	if (adf_init_vf_wq())
+		goto err_vf_wq;
+
+	if (qat_crypto_register())
+		goto err_crypto_register;
+
+	return 0;
+
+err_crypto_register:
+	adf_exit_vf_wq();
+err_vf_wq:
+	adf_exit_pf_wq();
+err_pf_wq:
+	adf_exit_aer();
+err_aer:
+	adf_chr_drv_destroy();
+err_chr_dev:
+	mutex_destroy(&adf_ctl_lock);
+	return -EFAULT;
+}
+
+static void __exit adf_unregister_ctl_device_driver(void)
+{
+	adf_chr_drv_destroy();
+	adf_exit_aer();
+	adf_exit_vf_wq();
+	adf_exit_pf_wq();
+	qat_crypto_unregister();
+	adf_clean_vf_map(false);
+	mutex_destroy(&adf_ctl_lock);
+}
+
+module_init(adf_register_ctl_device_driver);
+module_exit(adf_unregister_ctl_device_driver);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_ALIAS_CRYPTO("intel_qat");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_dev_mgr.c
new file mode 100644
index 0000000..8afac52
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_dev_mgr.c
@@ -0,0 +1,496 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static LIST_HEAD(accel_table);
+static LIST_HEAD(vfs_table);
+static DEFINE_MUTEX(table_lock);
+static uint32_t num_devices;
+static u8 id_map[ADF_MAX_DEVICES];
+
+struct vf_id_map {
+	u32 bdf;
+	u32 id;
+	u32 fake_id;
+	bool attached;
+	struct list_head list;
+};
+
+static int adf_get_vf_id(struct adf_accel_dev *vf)
+{
+	return ((7 * (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)) +
+		PCI_FUNC(accel_to_pci_dev(vf)->devfn) +
+		(PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1));
+}
+
+static int adf_get_vf_num(struct adf_accel_dev *vf)
+{
+	return (accel_to_pci_dev(vf)->bus->number << 8) | adf_get_vf_id(vf);
+}
+
+static struct vf_id_map *adf_find_vf(u32 bdf)
+{
+	struct list_head *itr;
+
+	list_for_each(itr, &vfs_table) {
+		struct vf_id_map *ptr =
+			list_entry(itr, struct vf_id_map, list);
+
+		if (ptr->bdf == bdf)
+			return ptr;
+	}
+	return NULL;
+}
+
+static int adf_get_vf_real_id(u32 fake)
+{
+	struct list_head *itr;
+
+	list_for_each(itr, &vfs_table) {
+		struct vf_id_map *ptr =
+			list_entry(itr, struct vf_id_map, list);
+		if (ptr->fake_id == fake)
+			return ptr->id;
+	}
+	return -1;
+}
+
+/**
+ * adf_clean_vf_map() - Cleans VF id mapings
+ *
+ * Function cleans internal ids for virtual functions.
+ * @vf: flag indicating whether mappings is cleaned
+ *	for vfs only or for vfs and pfs
+ */
+void adf_clean_vf_map(bool vf)
+{
+	struct vf_id_map *map;
+	struct list_head *ptr, *tmp;
+
+	mutex_lock(&table_lock);
+	list_for_each_safe(ptr, tmp, &vfs_table) {
+		map = list_entry(ptr, struct vf_id_map, list);
+		if (map->bdf != -1) {
+			id_map[map->id] = 0;
+			num_devices--;
+		}
+
+		if (vf && map->bdf == -1)
+			continue;
+
+		list_del(ptr);
+		kfree(map);
+	}
+	mutex_unlock(&table_lock);
+}
+EXPORT_SYMBOL_GPL(adf_clean_vf_map);
+
+/**
+ * adf_devmgr_update_class_index() - Update internal index
+ * @hw_data:  Pointer to internal device data.
+ *
+ * Function updates internal dev index for VFs
+ */
+void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data)
+{
+	struct adf_hw_device_class *class = hw_data->dev_class;
+	struct list_head *itr;
+	int i = 0;
+
+	list_for_each(itr, &accel_table) {
+		struct adf_accel_dev *ptr =
+				list_entry(itr, struct adf_accel_dev, list);
+
+		if (ptr->hw_device->dev_class == class)
+			ptr->hw_device->instance_id = i++;
+
+		if (i == class->instances)
+			break;
+	}
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index);
+
+static unsigned int adf_find_free_id(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < ADF_MAX_DEVICES; i++) {
+		if (!id_map[i]) {
+			id_map[i] = 1;
+			return i;
+		}
+	}
+	return ADF_MAX_DEVICES + 1;
+}
+
+/**
+ * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
+ * @accel_dev:  Pointer to acceleration device.
+ * @pf:		Corresponding PF if the accel_dev is a VF
+ *
+ * Function adds acceleration device to the acceleration framework.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
+		       struct adf_accel_dev *pf)
+{
+	struct list_head *itr;
+	int ret = 0;
+
+	if (num_devices == ADF_MAX_DEVICES) {
+		dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n",
+			ADF_MAX_DEVICES);
+		return -EFAULT;
+	}
+
+	mutex_lock(&table_lock);
+	atomic_set(&accel_dev->ref_count, 0);
+
+	/* PF on host or VF on guest */
+	if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
+		struct vf_id_map *map;
+
+		list_for_each(itr, &accel_table) {
+			struct adf_accel_dev *ptr =
+				list_entry(itr, struct adf_accel_dev, list);
+
+			if (ptr == accel_dev) {
+				ret = -EEXIST;
+				goto unlock;
+			}
+		}
+
+		list_add_tail(&accel_dev->list, &accel_table);
+		accel_dev->accel_id = adf_find_free_id();
+		if (accel_dev->accel_id > ADF_MAX_DEVICES) {
+			ret = -EFAULT;
+			goto unlock;
+		}
+		num_devices++;
+		map = kzalloc(sizeof(*map), GFP_KERNEL);
+		if (!map) {
+			ret = -ENOMEM;
+			goto unlock;
+		}
+		map->bdf = ~0;
+		map->id = accel_dev->accel_id;
+		map->fake_id = map->id;
+		map->attached = true;
+		list_add_tail(&map->list, &vfs_table);
+	} else if (accel_dev->is_vf && pf) {
+		/* VF on host */
+		struct adf_accel_vf_info *vf_info;
+		struct vf_id_map *map;
+
+		vf_info = pf->pf.vf_info + adf_get_vf_id(accel_dev);
+
+		map = adf_find_vf(adf_get_vf_num(accel_dev));
+		if (map) {
+			struct vf_id_map *next;
+
+			accel_dev->accel_id = map->id;
+			list_add_tail(&accel_dev->list, &accel_table);
+			map->fake_id++;
+			map->attached = true;
+			next = list_next_entry(map, list);
+			while (next && &next->list != &vfs_table) {
+				next->fake_id++;
+				next = list_next_entry(next, list);
+			}
+
+			ret = 0;
+			goto unlock;
+		}
+
+		map = kzalloc(sizeof(*map), GFP_KERNEL);
+		if (!map) {
+			ret = -ENOMEM;
+			goto unlock;
+		}
+		accel_dev->accel_id = adf_find_free_id();
+		if (accel_dev->accel_id > ADF_MAX_DEVICES) {
+			kfree(map);
+			ret = -EFAULT;
+			goto unlock;
+		}
+		num_devices++;
+		list_add_tail(&accel_dev->list, &accel_table);
+		map->bdf = adf_get_vf_num(accel_dev);
+		map->id = accel_dev->accel_id;
+		map->fake_id = map->id;
+		map->attached = true;
+		list_add_tail(&map->list, &vfs_table);
+	}
+unlock:
+	mutex_unlock(&table_lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
+
+struct list_head *adf_devmgr_get_head(void)
+{
+	return &accel_table;
+}
+
+/**
+ * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
+ * @accel_dev:  Pointer to acceleration device.
+ * @pf:		Corresponding PF if the accel_dev is a VF
+ *
+ * Function removes acceleration device from the acceleration framework.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
+		       struct adf_accel_dev *pf)
+{
+	mutex_lock(&table_lock);
+	if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
+		id_map[accel_dev->accel_id] = 0;
+		num_devices--;
+	} else if (accel_dev->is_vf && pf) {
+		struct vf_id_map *map, *next;
+
+		map = adf_find_vf(adf_get_vf_num(accel_dev));
+		if (!map) {
+			dev_err(&GET_DEV(accel_dev), "Failed to find VF map\n");
+			goto unlock;
+		}
+		map->fake_id--;
+		map->attached = false;
+		next = list_next_entry(map, list);
+		while (next && &next->list != &vfs_table) {
+			next->fake_id--;
+			next = list_next_entry(next, list);
+		}
+	}
+unlock:
+	list_del(&accel_dev->list);
+	mutex_unlock(&table_lock);
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
+
+struct adf_accel_dev *adf_devmgr_get_first(void)
+{
+	struct adf_accel_dev *dev = NULL;
+
+	if (!list_empty(&accel_table))
+		dev = list_first_entry(&accel_table, struct adf_accel_dev,
+				       list);
+	return dev;
+}
+
+/**
+ * adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev.
+ * @accel_dev:  Pointer to pci device.
+ *
+ * Function returns acceleration device associated with the given pci device.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: pointer to accel_dev or NULL if not found.
+ */
+struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
+{
+	struct list_head *itr;
+
+	mutex_lock(&table_lock);
+	list_for_each(itr, &accel_table) {
+		struct adf_accel_dev *ptr =
+				list_entry(itr, struct adf_accel_dev, list);
+
+		if (ptr->accel_pci_dev.pci_dev == pci_dev) {
+			mutex_unlock(&table_lock);
+			return ptr;
+		}
+	}
+	mutex_unlock(&table_lock);
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
+
+struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
+{
+	struct list_head *itr;
+	int real_id;
+
+	mutex_lock(&table_lock);
+	real_id = adf_get_vf_real_id(id);
+	if (real_id < 0)
+		goto unlock;
+
+	id = real_id;
+
+	list_for_each(itr, &accel_table) {
+		struct adf_accel_dev *ptr =
+				list_entry(itr, struct adf_accel_dev, list);
+		if (ptr->accel_id == id) {
+			mutex_unlock(&table_lock);
+			return ptr;
+		}
+	}
+unlock:
+	mutex_unlock(&table_lock);
+	return NULL;
+}
+
+int adf_devmgr_verify_id(uint32_t id)
+{
+	if (id == ADF_CFG_ALL_DEVICES)
+		return 0;
+
+	if (adf_devmgr_get_dev_by_id(id))
+		return 0;
+
+	return -ENODEV;
+}
+
+static int adf_get_num_dettached_vfs(void)
+{
+	struct list_head *itr;
+	int vfs = 0;
+
+	mutex_lock(&table_lock);
+	list_for_each(itr, &vfs_table) {
+		struct vf_id_map *ptr =
+			list_entry(itr, struct vf_id_map, list);
+		if (ptr->bdf != ~0 && !ptr->attached)
+			vfs++;
+	}
+	mutex_unlock(&table_lock);
+	return vfs;
+}
+
+void adf_devmgr_get_num_dev(uint32_t *num)
+{
+	*num = num_devices - adf_get_num_dettached_vfs();
+}
+
+/**
+ * adf_dev_in_use() - Check whether accel_dev is currently in use
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when device is in use, 0 otherwise.
+ */
+int adf_dev_in_use(struct adf_accel_dev *accel_dev)
+{
+	return atomic_read(&accel_dev->ref_count) != 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_in_use);
+
+/**
+ * adf_dev_get() - Increment accel_dev reference count
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Increment the accel_dev refcount and if this is the first time
+ * incrementing it during this period the accel_dev is in use,
+ * increment the module refcount too.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 when successful, EFAULT when fail to bump module refcount
+ */
+int adf_dev_get(struct adf_accel_dev *accel_dev)
+{
+	if (atomic_add_return(1, &accel_dev->ref_count) == 1)
+		if (!try_module_get(accel_dev->owner))
+			return -EFAULT;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_get);
+
+/**
+ * adf_dev_put() - Decrement accel_dev reference count
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Decrement the accel_dev refcount and if this is the last time
+ * decrementing it during this period the accel_dev is in use,
+ * decrement the module refcount too.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_dev_put(struct adf_accel_dev *accel_dev)
+{
+	if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
+		module_put(accel_dev->owner);
+}
+EXPORT_SYMBOL_GPL(adf_dev_put);
+
+/**
+ * adf_devmgr_in_reset() - Check whether device is in reset
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when the device is being reset, 0 otherwise.
+ */
+int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
+{
+	return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_in_reset);
+
+/**
+ * adf_dev_started() - Check whether device has started
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when the device has started, 0 otherwise
+ */
+int adf_dev_started(struct adf_accel_dev *accel_dev)
+{
+	return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
+}
+EXPORT_SYMBOL_GPL(adf_dev_started);
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_hw_arbiter.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
new file mode 100644
index 0000000..d7dd18d
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
@@ -0,0 +1,143 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_transport_internal.h"
+
+#define ADF_ARB_NUM 4
+#define ADF_ARB_REG_SIZE 0x4
+#define ADF_ARB_WTR_SIZE 0x20
+#define ADF_ARB_OFFSET 0x30000
+#define ADF_ARB_REG_SLOT 0x1000
+#define ADF_ARB_WTR_OFFSET 0x010
+#define ADF_ARB_RO_EN_OFFSET 0x090
+#define ADF_ARB_WQCFG_OFFSET 0x100
+#define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180
+#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
+
+#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
+	ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
+	(ADF_ARB_REG_SLOT * index), value)
+
+#define WRITE_CSR_ARB_SARCONFIG(csr_addr, index, value) \
+	ADF_CSR_WR(csr_addr, ADF_ARB_OFFSET + \
+	(ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_WRK_2_SER_MAP(csr_addr, index, value) \
+	ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+	ADF_ARB_WRK_2_SER_MAP_OFFSET) + \
+	(ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_WQCFG(csr_addr, index, value) \
+	ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+	ADF_ARB_WQCFG_OFFSET) + (ADF_ARB_REG_SIZE * index), value)
+
+int adf_init_arb(struct adf_accel_dev *accel_dev)
+{
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
+	u32 arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
+	u32 arb, i;
+	const u32 *thd_2_arb_cfg;
+
+	/* Service arb configured for 32 bytes responses and
+	 * ring flow control check enabled. */
+	for (arb = 0; arb < ADF_ARB_NUM; arb++)
+		WRITE_CSR_ARB_SARCONFIG(csr, arb, arb_cfg);
+
+	/* Setup worker queue registers */
+	for (i = 0; i < hw_data->num_engines; i++)
+		WRITE_CSR_ARB_WQCFG(csr, i, i);
+
+	/* Map worker threads to service arbiters */
+	hw_data->get_arb_mapping(accel_dev, &thd_2_arb_cfg);
+
+	if (!thd_2_arb_cfg)
+		return -EFAULT;
+
+	for (i = 0; i < hw_data->num_engines; i++)
+		WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, *(thd_2_arb_cfg + i));
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_init_arb);
+
+void adf_update_ring_arb(struct adf_etr_ring_data *ring)
+{
+	WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr,
+				   ring->bank->bank_number,
+				   ring->bank->ring_mask & 0xFF);
+}
+
+void adf_exit_arb(struct adf_accel_dev *accel_dev)
+{
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	void __iomem *csr;
+	unsigned int i;
+
+	if (!accel_dev->transport)
+		return;
+
+	csr = accel_dev->transport->banks[0].csr_addr;
+
+	/* Reset arbiter configuration */
+	for (i = 0; i < ADF_ARB_NUM; i++)
+		WRITE_CSR_ARB_SARCONFIG(csr, i, 0);
+
+	/* Shutdown work queue */
+	for (i = 0; i < hw_data->num_engines; i++)
+		WRITE_CSR_ARB_WQCFG(csr, i, 0);
+
+	/* Unmap worker threads to service arbiters */
+	for (i = 0; i < hw_data->num_engines; i++)
+		WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, 0);
+
+	/* Disable arbitration on all rings */
+	for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
+		WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0);
+}
+EXPORT_SYMBOL_GPL(adf_exit_arb);
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_init.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_init.c
new file mode 100644
index 0000000..26556c7
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_init.c
@@ -0,0 +1,384 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static LIST_HEAD(service_table);
+static DEFINE_MUTEX(service_lock);
+
+static void adf_service_add(struct service_hndl *service)
+{
+	mutex_lock(&service_lock);
+	list_add(&service->list, &service_table);
+	mutex_unlock(&service_lock);
+}
+
+int adf_service_register(struct service_hndl *service)
+{
+	memset(service->init_status, 0, sizeof(service->init_status));
+	memset(service->start_status, 0, sizeof(service->start_status));
+	adf_service_add(service);
+	return 0;
+}
+
+static void adf_service_remove(struct service_hndl *service)
+{
+	mutex_lock(&service_lock);
+	list_del(&service->list);
+	mutex_unlock(&service_lock);
+}
+
+int adf_service_unregister(struct service_hndl *service)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(service->init_status); i++) {
+		if (service->init_status[i] || service->start_status[i]) {
+			pr_err("QAT: Could not remove active service\n");
+			return -EFAULT;
+		}
+	}
+	adf_service_remove(service);
+	return 0;
+}
+
+/**
+ * adf_dev_init() - Init data structures and services for the given accel device
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Initialize the ring data structures and the admin comms and arbitration
+ * services.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_dev_init(struct adf_accel_dev *accel_dev)
+{
+	struct service_hndl *service;
+	struct list_head *list_itr;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+
+	if (!hw_data) {
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to init device - hw_data not set\n");
+		return -EFAULT;
+	}
+
+	if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status)) {
+		dev_err(&GET_DEV(accel_dev), "Device not configured\n");
+		return -EFAULT;
+	}
+
+	if (adf_init_etr_data(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev), "Failed initialize etr\n");
+		return -EFAULT;
+	}
+
+	if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n");
+		return -EFAULT;
+	}
+
+	if (hw_data->init_arb && hw_data->init_arb(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev), "Failed initialize hw arbiter\n");
+		return -EFAULT;
+	}
+
+	hw_data->enable_ints(accel_dev);
+
+	if (adf_ae_init(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to initialise Acceleration Engine\n");
+		return -EFAULT;
+	}
+	set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status);
+
+	if (adf_ae_fw_load(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to load acceleration FW\n");
+		return -EFAULT;
+	}
+	set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
+
+	if (hw_data->alloc_irq(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to allocate interrupts\n");
+		return -EFAULT;
+	}
+	set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
+
+	/*
+	 * Subservice initialisation is divided into two stages: init and start.
+	 * This is to facilitate any ordering dependencies between services
+	 * prior to starting any of the accelerators.
+	 */
+	list_for_each(list_itr, &service_table) {
+		service = list_entry(list_itr, struct service_hndl, list);
+		if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
+			dev_err(&GET_DEV(accel_dev),
+				"Failed to initialise service %s\n",
+				service->name);
+			return -EFAULT;
+		}
+		set_bit(accel_dev->accel_id, service->init_status);
+	}
+
+	hw_data->enable_error_correction(accel_dev);
+	hw_data->enable_vf2pf_comms(accel_dev);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_init);
+
+/**
+ * adf_dev_start() - Start acceleration service for the given accel device
+ * @accel_dev:    Pointer to acceleration device.
+ *
+ * Function notifies all the registered services that the acceleration device
+ * is ready to be used.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_dev_start(struct adf_accel_dev *accel_dev)
+{
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct service_hndl *service;
+	struct list_head *list_itr;
+
+	set_bit(ADF_STATUS_STARTING, &accel_dev->status);
+
+	if (adf_ae_start(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev), "AE Start Failed\n");
+		return -EFAULT;
+	}
+	set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
+
+	if (hw_data->send_admin_init(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to send init message\n");
+		return -EFAULT;
+	}
+
+	list_for_each(list_itr, &service_table) {
+		service = list_entry(list_itr, struct service_hndl, list);
+		if (service->event_hld(accel_dev, ADF_EVENT_START)) {
+			dev_err(&GET_DEV(accel_dev),
+				"Failed to start service %s\n",
+				service->name);
+			return -EFAULT;
+		}
+		set_bit(accel_dev->accel_id, service->start_status);
+	}
+
+	clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
+	set_bit(ADF_STATUS_STARTED, &accel_dev->status);
+
+	if (!list_empty(&accel_dev->crypto_list) &&
+	    (qat_algs_register() || qat_asym_algs_register())) {
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to register crypto algs\n");
+		set_bit(ADF_STATUS_STARTING, &accel_dev->status);
+		clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+		return -EFAULT;
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_start);
+
+/**
+ * adf_dev_stop() - Stop acceleration service for the given accel device
+ * @accel_dev:    Pointer to acceleration device.
+ *
+ * Function notifies all the registered services that the acceleration device
+ * is shuting down.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_dev_stop(struct adf_accel_dev *accel_dev)
+{
+	struct service_hndl *service;
+	struct list_head *list_itr;
+	bool wait = false;
+	int ret;
+
+	if (!adf_dev_started(accel_dev) &&
+	    !test_bit(ADF_STATUS_STARTING, &accel_dev->status))
+		return;
+
+	clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
+	clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+
+	if (!list_empty(&accel_dev->crypto_list)) {
+		qat_algs_unregister();
+		qat_asym_algs_unregister();
+	}
+
+	list_for_each(list_itr, &service_table) {
+		service = list_entry(list_itr, struct service_hndl, list);
+		if (!test_bit(accel_dev->accel_id, service->start_status))
+			continue;
+		ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
+		if (!ret) {
+			clear_bit(accel_dev->accel_id, service->start_status);
+		} else if (ret == -EAGAIN) {
+			wait = true;
+			clear_bit(accel_dev->accel_id, service->start_status);
+		}
+	}
+
+	if (wait)
+		msleep(100);
+
+	if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) {
+		if (adf_ae_stop(accel_dev))
+			dev_err(&GET_DEV(accel_dev), "failed to stop AE\n");
+		else
+			clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
+	}
+}
+EXPORT_SYMBOL_GPL(adf_dev_stop);
+
+/**
+ * adf_dev_shutdown() - shutdown acceleration services and data strucutures
+ * @accel_dev: Pointer to acceleration device
+ *
+ * Cleanup the ring data structures and the admin comms and arbitration
+ * services.
+ */
+void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
+{
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct service_hndl *service;
+	struct list_head *list_itr;
+
+	if (!hw_data) {
+		dev_err(&GET_DEV(accel_dev),
+			"QAT: Failed to shutdown device - hw_data not set\n");
+		return;
+	}
+
+	if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
+		adf_ae_fw_release(accel_dev);
+		clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
+	}
+
+	if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) {
+		if (adf_ae_shutdown(accel_dev))
+			dev_err(&GET_DEV(accel_dev),
+				"Failed to shutdown Accel Engine\n");
+		else
+			clear_bit(ADF_STATUS_AE_INITIALISED,
+				  &accel_dev->status);
+	}
+
+	list_for_each(list_itr, &service_table) {
+		service = list_entry(list_itr, struct service_hndl, list);
+		if (!test_bit(accel_dev->accel_id, service->init_status))
+			continue;
+		if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
+			dev_err(&GET_DEV(accel_dev),
+				"Failed to shutdown service %s\n",
+				service->name);
+		else
+			clear_bit(accel_dev->accel_id, service->init_status);
+	}
+
+	hw_data->disable_iov(accel_dev);
+
+	if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
+		hw_data->free_irq(accel_dev);
+		clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
+	}
+
+	/* Delete configuration only if not restarting */
+	if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
+		adf_cfg_del_all(accel_dev);
+
+	if (hw_data->exit_arb)
+		hw_data->exit_arb(accel_dev);
+
+	if (hw_data->exit_admin_comms)
+		hw_data->exit_admin_comms(accel_dev);
+
+	adf_cleanup_etr_data(accel_dev);
+	adf_dev_restore(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_dev_shutdown);
+
+int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
+{
+	struct service_hndl *service;
+	struct list_head *list_itr;
+
+	list_for_each(list_itr, &service_table) {
+		service = list_entry(list_itr, struct service_hndl, list);
+		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
+			dev_err(&GET_DEV(accel_dev),
+				"Failed to restart service %s.\n",
+				service->name);
+	}
+	return 0;
+}
+
+int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
+{
+	struct service_hndl *service;
+	struct list_head *list_itr;
+
+	list_for_each(list_itr, &service_table) {
+		service = list_entry(list_itr, struct service_hndl, list);
+		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
+			dev_err(&GET_DEV(accel_dev),
+				"Failed to restart service %s.\n",
+				service->name);
+	}
+	return 0;
+}
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_isr.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_isr.c
new file mode 100644
index 0000000..06d4901
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_isr.c
@@ -0,0 +1,348 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "adf_cfg_common.h"
+#include "adf_transport_access_macros.h"
+#include "adf_transport_internal.h"
+
+static int adf_enable_msix(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	u32 msix_num_entries = 1;
+
+	/* If SR-IOV is disabled, add entries for each bank */
+	if (!accel_dev->pf.vf_info) {
+		int i;
+
+		msix_num_entries += hw_data->num_banks;
+		for (i = 0; i < msix_num_entries; i++)
+			pci_dev_info->msix_entries.entries[i].entry = i;
+	} else {
+		pci_dev_info->msix_entries.entries[0].entry =
+			hw_data->num_banks;
+	}
+
+	if (pci_enable_msix_exact(pci_dev_info->pci_dev,
+				  pci_dev_info->msix_entries.entries,
+				  msix_num_entries)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to enable MSI-X IRQ(s)\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void adf_disable_msix(struct adf_accel_pci *pci_dev_info)
+{
+	pci_disable_msix(pci_dev_info->pci_dev);
+}
+
+static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
+{
+	struct adf_etr_bank_data *bank = bank_ptr;
+
+	WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0);
+	tasklet_hi_schedule(&bank->resp_handler);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
+{
+	struct adf_accel_dev *accel_dev = dev_ptr;
+
+#ifdef CONFIG_PCI_IOV
+	/* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
+	if (accel_dev->pf.vf_info) {
+		struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+		struct adf_bar *pmisc =
+			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+		void __iomem *pmisc_bar_addr = pmisc->virt_addr;
+		u32 vf_mask;
+
+		/* Get the interrupt sources triggered by VFs */
+		vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU5) &
+			    0x0000FFFF) << 16) |
+			  ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU3) &
+			    0x01FFFE00) >> 9);
+
+		if (vf_mask) {
+			struct adf_accel_vf_info *vf_info;
+			bool irq_handled = false;
+			int i;
+
+			/* Disable VF2PF interrupts for VFs with pending ints */
+			adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
+
+			/*
+			 * Schedule tasklets to handle VF2PF interrupt BHs
+			 * unless the VF is malicious and is attempting to
+			 * flood the host OS with VF2PF interrupts.
+			 */
+			for_each_set_bit(i, (const unsigned long *)&vf_mask,
+					 (sizeof(vf_mask) * BITS_PER_BYTE)) {
+				vf_info = accel_dev->pf.vf_info + i;
+
+				if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
+					dev_info(&GET_DEV(accel_dev),
+						 "Too many ints from VF%d\n",
+						  vf_info->vf_nr + 1);
+					continue;
+				}
+
+				/* Tasklet will re-enable ints from this VF */
+				tasklet_hi_schedule(&vf_info->vf2pf_bh_tasklet);
+				irq_handled = true;
+			}
+
+			if (irq_handled)
+				return IRQ_HANDLED;
+		}
+	}
+#endif /* CONFIG_PCI_IOV */
+
+	dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
+		accel_dev->accel_id);
+
+	return IRQ_NONE;
+}
+
+static int adf_request_irqs(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
+	struct adf_etr_data *etr_data = accel_dev->transport;
+	int ret, i = 0;
+	char *name;
+
+	/* Request msix irq for all banks unless SR-IOV enabled */
+	if (!accel_dev->pf.vf_info) {
+		for (i = 0; i < hw_data->num_banks; i++) {
+			struct adf_etr_bank_data *bank = &etr_data->banks[i];
+			unsigned int cpu, cpus = num_online_cpus();
+
+			name = *(pci_dev_info->msix_entries.names + i);
+			snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
+				 "qat%d-bundle%d", accel_dev->accel_id, i);
+			ret = request_irq(msixe[i].vector,
+					  adf_msix_isr_bundle, 0, name, bank);
+			if (ret) {
+				dev_err(&GET_DEV(accel_dev),
+					"failed to enable irq %d for %s\n",
+					msixe[i].vector, name);
+				return ret;
+			}
+
+			cpu = ((accel_dev->accel_id * hw_data->num_banks) +
+			       i) % cpus;
+			irq_set_affinity_hint(msixe[i].vector,
+					      get_cpu_mask(cpu));
+		}
+	}
+
+	/* Request msix irq for AE */
+	name = *(pci_dev_info->msix_entries.names + i);
+	snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
+		 "qat%d-ae-cluster", accel_dev->accel_id);
+	ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev);
+	if (ret) {
+		dev_err(&GET_DEV(accel_dev),
+			"failed to enable irq %d, for %s\n",
+			msixe[i].vector, name);
+		return ret;
+	}
+	return ret;
+}
+
+static void adf_free_irqs(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
+	struct adf_etr_data *etr_data = accel_dev->transport;
+	int i = 0;
+
+	if (pci_dev_info->msix_entries.num_entries > 1) {
+		for (i = 0; i < hw_data->num_banks; i++) {
+			irq_set_affinity_hint(msixe[i].vector, NULL);
+			free_irq(msixe[i].vector, &etr_data->banks[i]);
+		}
+	}
+	irq_set_affinity_hint(msixe[i].vector, NULL);
+	free_irq(msixe[i].vector, accel_dev);
+}
+
+static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
+{
+	int i;
+	char **names;
+	struct msix_entry *entries;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	u32 msix_num_entries = 1;
+
+	/* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
+	if (!accel_dev->pf.vf_info)
+		msix_num_entries += hw_data->num_banks;
+
+	entries = kzalloc_node(msix_num_entries * sizeof(*entries),
+			       GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
+	if (!entries)
+		return -ENOMEM;
+
+	names = kcalloc(msix_num_entries, sizeof(char *), GFP_KERNEL);
+	if (!names) {
+		kfree(entries);
+		return -ENOMEM;
+	}
+	for (i = 0; i < msix_num_entries; i++) {
+		*(names + i) = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
+		if (!(*(names + i)))
+			goto err;
+	}
+	accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
+	accel_dev->accel_pci_dev.msix_entries.entries = entries;
+	accel_dev->accel_pci_dev.msix_entries.names = names;
+	return 0;
+err:
+	for (i = 0; i < msix_num_entries; i++)
+		kfree(*(names + i));
+	kfree(entries);
+	kfree(names);
+	return -ENOMEM;
+}
+
+static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
+{
+	char **names = accel_dev->accel_pci_dev.msix_entries.names;
+	int i;
+
+	kfree(accel_dev->accel_pci_dev.msix_entries.entries);
+	for (i = 0; i < accel_dev->accel_pci_dev.msix_entries.num_entries; i++)
+		kfree(*(names + i));
+	kfree(names);
+}
+
+static int adf_setup_bh(struct adf_accel_dev *accel_dev)
+{
+	struct adf_etr_data *priv_data = accel_dev->transport;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	int i;
+
+	for (i = 0; i < hw_data->num_banks; i++)
+		tasklet_init(&priv_data->banks[i].resp_handler,
+			     adf_response_handler,
+			     (unsigned long)&priv_data->banks[i]);
+	return 0;
+}
+
+static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
+{
+	struct adf_etr_data *priv_data = accel_dev->transport;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	int i;
+
+	for (i = 0; i < hw_data->num_banks; i++) {
+		tasklet_disable(&priv_data->banks[i].resp_handler);
+		tasklet_kill(&priv_data->banks[i].resp_handler);
+	}
+}
+
+/**
+ * adf_isr_resource_free() - Free IRQ for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function frees interrupts for acceleration device.
+ */
+void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
+{
+	adf_free_irqs(accel_dev);
+	adf_cleanup_bh(accel_dev);
+	adf_disable_msix(&accel_dev->accel_pci_dev);
+	adf_isr_free_msix_entry_table(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_isr_resource_free);
+
+/**
+ * adf_isr_resource_alloc() - Allocate IRQ for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function allocates interrupts for acceleration device.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
+{
+	int ret;
+
+	ret = adf_isr_alloc_msix_entry_table(accel_dev);
+	if (ret)
+		return ret;
+	if (adf_enable_msix(accel_dev))
+		goto err_out;
+
+	if (adf_setup_bh(accel_dev))
+		goto err_out;
+
+	if (adf_request_irqs(accel_dev))
+		goto err_out;
+
+	return 0;
+err_out:
+	adf_isr_resource_free(accel_dev);
+	return -EFAULT;
+}
+EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
new file mode 100644
index 0000000..b3875fd
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
@@ -0,0 +1,415 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pf2vf_msg.h"
+
+#define ADF_DH895XCC_EP_OFFSET	0x3A000
+#define ADF_DH895XCC_ERRMSK3	(ADF_DH895XCC_EP_OFFSET + 0x1C)
+#define ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask) ((vf_mask & 0xFFFF) << 9)
+#define ADF_DH895XCC_ERRMSK5	(ADF_DH895XCC_EP_OFFSET + 0xDC)
+#define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
+
+void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	void __iomem *pmisc_bar_addr =
+		pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+
+	ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0);
+}
+
+void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	void __iomem *pmisc_bar_addr =
+		pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+
+	ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2);
+}
+
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+				 u32 vf_mask)
+{
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_bar *pmisc =
+			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+	void __iomem *pmisc_addr = pmisc->virt_addr;
+	u32 reg;
+
+	/* Enable VF2PF Messaging Ints - VFs 1 through 16 per vf_mask[15:0] */
+	if (vf_mask & 0xFFFF) {
+		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3);
+		reg &= ~ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
+		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
+	}
+
+	/* Enable VF2PF Messaging Ints - VFs 17 through 32 per vf_mask[31:16] */
+	if (vf_mask >> 16) {
+		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5);
+		reg &= ~ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
+		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
+	}
+}
+
+void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
+{
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_bar *pmisc =
+			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+	void __iomem *pmisc_addr = pmisc->virt_addr;
+	u32 reg;
+
+	/* Disable VF2PF interrupts for VFs 1 through 16 per vf_mask[15:0] */
+	if (vf_mask & 0xFFFF) {
+		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3) |
+			ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
+		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
+	}
+
+	/* Disable VF2PF interrupts for VFs 17 through 32 per vf_mask[31:16] */
+	if (vf_mask >> 16) {
+		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5) |
+			ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
+		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
+	}
+}
+
+static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
+{
+	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	void __iomem *pmisc_bar_addr =
+		pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+	u32 val, pf2vf_offset, count = 0;
+	u32 local_in_use_mask, local_in_use_pattern;
+	u32 remote_in_use_mask, remote_in_use_pattern;
+	struct mutex *lock;	/* lock preventing concurrent acces of CSR */
+	u32 int_bit;
+	int ret = 0;
+
+	if (accel_dev->is_vf) {
+		pf2vf_offset = hw_data->get_pf2vf_offset(0);
+		lock = &accel_dev->vf.vf2pf_lock;
+		local_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
+		local_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
+		remote_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
+		remote_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
+		int_bit = ADF_VF2PF_INT;
+	} else {
+		pf2vf_offset = hw_data->get_pf2vf_offset(vf_nr);
+		lock = &accel_dev->pf.vf_info[vf_nr].pf2vf_lock;
+		local_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
+		local_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
+		remote_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
+		remote_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
+		int_bit = ADF_PF2VF_INT;
+	}
+
+	mutex_lock(lock);
+
+	/* Check if PF2VF CSR is in use by remote function */
+	val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
+	if ((val & remote_in_use_mask) == remote_in_use_pattern) {
+		dev_dbg(&GET_DEV(accel_dev),
+			"PF2VF CSR in use by remote function\n");
+		ret = -EBUSY;
+		goto out;
+	}
+
+	/* Attempt to get ownership of PF2VF CSR */
+	msg &= ~local_in_use_mask;
+	msg |= local_in_use_pattern;
+	ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg);
+
+	/* Wait in case remote func also attempting to get ownership */
+	msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY);
+
+	val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
+	if ((val & local_in_use_mask) != local_in_use_pattern) {
+		dev_dbg(&GET_DEV(accel_dev),
+			"PF2VF CSR in use by remote - collision detected\n");
+		ret = -EBUSY;
+		goto out;
+	}
+
+	/*
+	 * This function now owns the PV2VF CSR.  The IN_USE_BY pattern must
+	 * remain in the PF2VF CSR for all writes including ACK from remote
+	 * until this local function relinquishes the CSR.  Send the message
+	 * by interrupting the remote.
+	 */
+	ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit);
+
+	/* Wait for confirmation from remote func it received the message */
+	do {
+		msleep(ADF_IOV_MSG_ACK_DELAY);
+		val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
+	} while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY));
+
+	if (val & int_bit) {
+		dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
+		val &= ~int_bit;
+		ret = -EIO;
+	}
+
+	/* Finished with PF2VF CSR; relinquish it and leave msg in CSR */
+	ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask);
+out:
+	mutex_unlock(lock);
+	return ret;
+}
+
+/**
+ * adf_iov_putmsg() - send PF2VF message
+ * @accel_dev:  Pointer to acceleration device.
+ * @msg:	Message to send
+ * @vf_nr:	VF number to which the message will be sent
+ *
+ * Function sends a messge from the PF to a VF
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
+{
+	u32 count = 0;
+	int ret;
+
+	do {
+		ret = __adf_iov_putmsg(accel_dev, msg, vf_nr);
+		if (ret)
+			msleep(ADF_IOV_MSG_RETRY_DELAY);
+	} while (ret && (count++ < ADF_IOV_MSG_MAX_RETRIES));
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(adf_iov_putmsg);
+
+void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
+{
+	struct adf_accel_dev *accel_dev = vf_info->accel_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	int bar_id = hw_data->get_misc_bar_id(hw_data);
+	struct adf_bar *pmisc = &GET_BARS(accel_dev)[bar_id];
+	void __iomem *pmisc_addr = pmisc->virt_addr;
+	u32 msg, resp = 0, vf_nr = vf_info->vf_nr;
+
+	/* Read message from the VF */
+	msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr));
+
+	/* To ACK, clear the VF2PFINT bit */
+	msg &= ~ADF_VF2PF_INT;
+	ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg);
+
+	if (!(msg & ADF_VF2PF_MSGORIGIN_SYSTEM))
+		/* Ignore legacy non-system (non-kernel) VF2PF messages */
+		goto err;
+
+	switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >> ADF_VF2PF_MSGTYPE_SHIFT) {
+	case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
+		{
+		u8 vf_compat_ver = msg >> ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
+
+		resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
+			 (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
+			  ADF_PF2VF_MSGTYPE_SHIFT) |
+			 (ADF_PFVF_COMPATIBILITY_VERSION <<
+			  ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
+
+		dev_dbg(&GET_DEV(accel_dev),
+			"Compatibility Version Request from VF%d vers=%u\n",
+			vf_nr + 1, vf_compat_ver);
+
+		if (vf_compat_ver < hw_data->min_iov_compat_ver) {
+			dev_err(&GET_DEV(accel_dev),
+				"VF (vers %d) incompatible with PF (vers %d)\n",
+				vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+			resp |= ADF_PF2VF_VF_INCOMPATIBLE <<
+				ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+		} else if (vf_compat_ver > ADF_PFVF_COMPATIBILITY_VERSION) {
+			dev_err(&GET_DEV(accel_dev),
+				"VF (vers %d) compat with PF (vers %d) unkn.\n",
+				vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+			resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN <<
+				ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+		} else {
+			dev_dbg(&GET_DEV(accel_dev),
+				"VF (vers %d) compatible with PF (vers %d)\n",
+				vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+			resp |= ADF_PF2VF_VF_COMPATIBLE <<
+				ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+		}
+		}
+		break;
+	case ADF_VF2PF_MSGTYPE_VERSION_REQ:
+		dev_dbg(&GET_DEV(accel_dev),
+			"Legacy VersionRequest received from VF%d 0x%x\n",
+			vf_nr + 1, msg);
+		resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
+			 (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
+			  ADF_PF2VF_MSGTYPE_SHIFT) |
+			 (ADF_PFVF_COMPATIBILITY_VERSION <<
+			  ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
+		resp |= ADF_PF2VF_VF_COMPATIBLE <<
+			ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+		/* Set legacy major and minor version num */
+		resp |= 1 << ADF_PF2VF_MAJORVERSION_SHIFT |
+			1 << ADF_PF2VF_MINORVERSION_SHIFT;
+		break;
+	case ADF_VF2PF_MSGTYPE_INIT:
+		{
+		dev_dbg(&GET_DEV(accel_dev),
+			"Init message received from VF%d 0x%x\n",
+			vf_nr + 1, msg);
+		vf_info->init = true;
+		}
+		break;
+	case ADF_VF2PF_MSGTYPE_SHUTDOWN:
+		{
+		dev_dbg(&GET_DEV(accel_dev),
+			"Shutdown message received from VF%d 0x%x\n",
+			vf_nr + 1, msg);
+		vf_info->init = false;
+		}
+		break;
+	default:
+		goto err;
+	}
+
+	if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr))
+		dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n");
+
+	/* re-enable interrupt on PF from this VF */
+	adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
+	return;
+err:
+	dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n",
+		vf_nr + 1, msg);
+}
+
+void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_vf_info *vf;
+	u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM |
+		(ADF_PF2VF_MSGTYPE_RESTARTING << ADF_PF2VF_MSGTYPE_SHIFT));
+	int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
+
+	for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
+		if (vf->init && adf_iov_putmsg(accel_dev, msg, i))
+			dev_err(&GET_DEV(accel_dev),
+				"Failed to send restarting msg to VF%d\n", i);
+	}
+}
+
+static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
+{
+	unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT);
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	u32 msg = 0;
+	int ret;
+
+	msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
+	msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
+	msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
+	BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
+
+	/* Send request from VF to PF */
+	ret = adf_iov_putmsg(accel_dev, msg, 0);
+	if (ret) {
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to send Compatibility Version Request.\n");
+		return ret;
+	}
+
+	/* Wait for response */
+	if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
+					 timeout)) {
+		dev_err(&GET_DEV(accel_dev),
+			"IOV request/response message timeout expired\n");
+		return -EIO;
+	}
+
+	/* Response from PF received, check compatibility */
+	switch (accel_dev->vf.compatible) {
+	case ADF_PF2VF_VF_COMPATIBLE:
+		break;
+	case ADF_PF2VF_VF_COMPAT_UNKNOWN:
+		/* VF is newer than PF and decides whether it is compatible */
+		if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver)
+			break;
+		/* fall through */
+	case ADF_PF2VF_VF_INCOMPATIBLE:
+		dev_err(&GET_DEV(accel_dev),
+			"PF (vers %d) and VF (vers %d) are not compatible\n",
+			accel_dev->vf.pf_version,
+			ADF_PFVF_COMPATIBILITY_VERSION);
+		return -EINVAL;
+	default:
+		dev_err(&GET_DEV(accel_dev),
+			"Invalid response from PF; assume not compatible\n");
+		return -EINVAL;
+	}
+	return ret;
+}
+
+/**
+ * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
+ *
+ * @accel_dev: Pointer to acceleration device virtual function.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+{
+	adf_enable_pf2vf_interrupts(accel_dev);
+	return adf_vf2pf_request_version(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
new file mode 100644
index 0000000..5acd531
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
@@ -0,0 +1,146 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_PF2VF_MSG_H
+#define ADF_PF2VF_MSG_H
+
+/*
+ * PF<->VF Messaging
+ * The PF has an array of 32-bit PF2VF registers, one for each VF.  The
+ * PF can access all these registers; each VF can access only the one
+ * register associated with that particular VF.
+ *
+ * The register functionally is split into two parts:
+ * The bottom half is for PF->VF messages. In particular when the first
+ * bit of this register (bit 0) gets set an interrupt will be triggered
+ * in the respective VF.
+ * The top half is for VF->PF messages. In particular when the first bit
+ * of this half of register (bit 16) gets set an interrupt will be triggered
+ * in the PF.
+ *
+ * The remaining bits within this register are available to encode messages.
+ * and implement a collision control mechanism to prevent concurrent use of
+ * the PF2VF register by both the PF and VF.
+ *
+ *  31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
+ *  _______________________________________________
+ * |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |
+ * +-----------------------------------------------+
+ *  \___________________________/ \_________/ ^   ^
+ *                ^                    ^      |   |
+ *                |                    |      |   VF2PF Int
+ *                |                    |      Message Origin
+ *                |                    Message Type
+ *                Message-specific Data/Reserved
+ *
+ *  15 14 13 12 11 10  9  8  7  6  5  4  3  2  1  0
+ *  _______________________________________________
+ * |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |
+ * +-----------------------------------------------+
+ *  \___________________________/ \_________/ ^   ^
+ *                ^                    ^      |   |
+ *                |                    |      |   PF2VF Int
+ *                |                    |      Message Origin
+ *                |                    Message Type
+ *                Message-specific Data/Reserved
+ *
+ * Message Origin (Should always be 1)
+ * A legacy out-of-tree QAT driver allowed for a set of messages not supported
+ * by this driver; these had a Msg Origin of 0 and are ignored by this driver.
+ *
+ * When a PF or VF attempts to send a message in the lower or upper 16 bits,
+ * respectively, the other 16 bits are written to first with a defined
+ * IN_USE_BY pattern as part of a collision control scheme (see adf_iov_putmsg).
+ */
+
+#define ADF_PFVF_COMPATIBILITY_VERSION		0x1	/* PF<->VF compat */
+
+/* PF->VF messages */
+#define ADF_PF2VF_INT				BIT(0)
+#define ADF_PF2VF_MSGORIGIN_SYSTEM		BIT(1)
+#define ADF_PF2VF_MSGTYPE_MASK			0x0000003C
+#define ADF_PF2VF_MSGTYPE_SHIFT			2
+#define ADF_PF2VF_MSGTYPE_RESTARTING		0x01
+#define ADF_PF2VF_MSGTYPE_VERSION_RESP		0x02
+#define ADF_PF2VF_IN_USE_BY_PF			0x6AC20000
+#define ADF_PF2VF_IN_USE_BY_PF_MASK		0xFFFE0000
+
+/* PF->VF Version Response */
+#define ADF_PF2VF_VERSION_RESP_VERS_MASK	0x00003FC0
+#define ADF_PF2VF_VERSION_RESP_VERS_SHIFT	6
+#define ADF_PF2VF_VERSION_RESP_RESULT_MASK	0x0000C000
+#define ADF_PF2VF_VERSION_RESP_RESULT_SHIFT	14
+#define ADF_PF2VF_MINORVERSION_SHIFT		6
+#define ADF_PF2VF_MAJORVERSION_SHIFT		10
+#define ADF_PF2VF_VF_COMPATIBLE			1
+#define ADF_PF2VF_VF_INCOMPATIBLE		2
+#define ADF_PF2VF_VF_COMPAT_UNKNOWN		3
+
+/* VF->PF messages */
+#define ADF_VF2PF_IN_USE_BY_VF			0x00006AC2
+#define ADF_VF2PF_IN_USE_BY_VF_MASK		0x0000FFFE
+#define ADF_VF2PF_INT				BIT(16)
+#define ADF_VF2PF_MSGORIGIN_SYSTEM		BIT(17)
+#define ADF_VF2PF_MSGTYPE_MASK			0x003C0000
+#define ADF_VF2PF_MSGTYPE_SHIFT			18
+#define ADF_VF2PF_MSGTYPE_INIT			0x3
+#define ADF_VF2PF_MSGTYPE_SHUTDOWN		0x4
+#define ADF_VF2PF_MSGTYPE_VERSION_REQ		0x5
+#define ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ	0x6
+
+/* VF->PF Compatible Version Request */
+#define ADF_VF2PF_COMPAT_VER_REQ_SHIFT		22
+
+/* Collision detection */
+#define ADF_IOV_MSG_COLLISION_DETECT_DELAY	10
+#define ADF_IOV_MSG_ACK_DELAY			2
+#define ADF_IOV_MSG_ACK_MAX_RETRY		100
+#define ADF_IOV_MSG_RETRY_DELAY			5
+#define ADF_IOV_MSG_MAX_RETRIES			3
+#define ADF_IOV_MSG_RESP_TIMEOUT	(ADF_IOV_MSG_ACK_DELAY * \
+					 ADF_IOV_MSG_ACK_MAX_RETRY + \
+					 ADF_IOV_MSG_COLLISION_DETECT_DELAY)
+#endif /* ADF_IOV_MSG_H */
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_sriov.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_sriov.c
new file mode 100644
index 0000000..b36d865
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -0,0 +1,306 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/iommu.h>
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_pf2vf_msg.h"
+
+static struct workqueue_struct *pf2vf_resp_wq;
+
+#define ME2FUNCTION_MAP_A_OFFSET	(0x3A400 + 0x190)
+#define ME2FUNCTION_MAP_A_NUM_REGS	96
+
+#define ME2FUNCTION_MAP_B_OFFSET	(0x3A400 + 0x310)
+#define ME2FUNCTION_MAP_B_NUM_REGS	12
+
+#define ME2FUNCTION_MAP_REG_SIZE	4
+#define ME2FUNCTION_MAP_VALID		BIT(7)
+
+#define READ_CSR_ME2FUNCTION_MAP_A(pmisc_bar_addr, index)		\
+	ADF_CSR_RD(pmisc_bar_addr, ME2FUNCTION_MAP_A_OFFSET +		\
+		   ME2FUNCTION_MAP_REG_SIZE * index)
+
+#define WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_bar_addr, index, value)	\
+	ADF_CSR_WR(pmisc_bar_addr, ME2FUNCTION_MAP_A_OFFSET +		\
+		   ME2FUNCTION_MAP_REG_SIZE * index, value)
+
+#define READ_CSR_ME2FUNCTION_MAP_B(pmisc_bar_addr, index)		\
+	ADF_CSR_RD(pmisc_bar_addr, ME2FUNCTION_MAP_B_OFFSET +		\
+		   ME2FUNCTION_MAP_REG_SIZE * index)
+
+#define WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_bar_addr, index, value)	\
+	ADF_CSR_WR(pmisc_bar_addr, ME2FUNCTION_MAP_B_OFFSET +		\
+		   ME2FUNCTION_MAP_REG_SIZE * index, value)
+
+struct adf_pf2vf_resp {
+	struct work_struct pf2vf_resp_work;
+	struct adf_accel_vf_info *vf_info;
+};
+
+static void adf_iov_send_resp(struct work_struct *work)
+{
+	struct adf_pf2vf_resp *pf2vf_resp =
+		container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work);
+
+	adf_vf2pf_req_hndl(pf2vf_resp->vf_info);
+	kfree(pf2vf_resp);
+}
+
+static void adf_vf2pf_bh_handler(void *data)
+{
+	struct adf_accel_vf_info *vf_info = (struct adf_accel_vf_info *)data;
+	struct adf_pf2vf_resp *pf2vf_resp;
+
+	pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC);
+	if (!pf2vf_resp)
+		return;
+
+	pf2vf_resp->vf_info = vf_info;
+	INIT_WORK(&pf2vf_resp->pf2vf_resp_work, adf_iov_send_resp);
+	queue_work(pf2vf_resp_wq, &pf2vf_resp->pf2vf_resp_work);
+}
+
+static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
+{
+	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+	int totalvfs = pci_sriov_get_totalvfs(pdev);
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_bar *pmisc =
+			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+	void __iomem *pmisc_addr = pmisc->virt_addr;
+	struct adf_accel_vf_info *vf_info;
+	int i;
+	u32 reg;
+
+	for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
+	     i++, vf_info++) {
+		/* This ptr will be populated when VFs will be created */
+		vf_info->accel_dev = accel_dev;
+		vf_info->vf_nr = i;
+
+		tasklet_init(&vf_info->vf2pf_bh_tasklet,
+			     (void *)adf_vf2pf_bh_handler,
+			     (unsigned long)vf_info);
+		mutex_init(&vf_info->pf2vf_lock);
+		ratelimit_state_init(&vf_info->vf2pf_ratelimit,
+				     DEFAULT_RATELIMIT_INTERVAL,
+				     DEFAULT_RATELIMIT_BURST);
+	}
+
+	/* Set Valid bits in ME Thread to PCIe Function Mapping Group A */
+	for (i = 0; i < ME2FUNCTION_MAP_A_NUM_REGS; i++) {
+		reg = READ_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i);
+		reg |= ME2FUNCTION_MAP_VALID;
+		WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i, reg);
+	}
+
+	/* Set Valid bits in ME Thread to PCIe Function Mapping Group B */
+	for (i = 0; i < ME2FUNCTION_MAP_B_NUM_REGS; i++) {
+		reg = READ_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i);
+		reg |= ME2FUNCTION_MAP_VALID;
+		WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i, reg);
+	}
+
+	/* Enable VF to PF interrupts for all VFs */
+	adf_enable_vf2pf_interrupts(accel_dev, GENMASK_ULL(totalvfs - 1, 0));
+
+	/*
+	 * Due to the hardware design, when SR-IOV and the ring arbiter
+	 * are enabled all the VFs supported in hardware must be enabled in
+	 * order for all the hardware resources (i.e. bundles) to be usable.
+	 * When SR-IOV is enabled, each of the VFs will own one bundle.
+	 */
+	return pci_enable_sriov(pdev, totalvfs);
+}
+
+/**
+ * adf_disable_sriov() - Disable SRIOV for the device
+ * @accel_dev:  Pointer to accel device.
+ *
+ * Function disables SRIOV for the accel device.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+void adf_disable_sriov(struct adf_accel_dev *accel_dev)
+{
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_bar *pmisc =
+			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+	void __iomem *pmisc_addr = pmisc->virt_addr;
+	int totalvfs = pci_sriov_get_totalvfs(accel_to_pci_dev(accel_dev));
+	struct adf_accel_vf_info *vf;
+	u32 reg;
+	int i;
+
+	if (!accel_dev->pf.vf_info)
+		return;
+
+	adf_pf2vf_notify_restarting(accel_dev);
+
+	pci_disable_sriov(accel_to_pci_dev(accel_dev));
+
+	/* Disable VF to PF interrupts */
+	adf_disable_vf2pf_interrupts(accel_dev, 0xFFFFFFFF);
+
+	/* Clear Valid bits in ME Thread to PCIe Function Mapping Group A */
+	for (i = 0; i < ME2FUNCTION_MAP_A_NUM_REGS; i++) {
+		reg = READ_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i);
+		reg &= ~ME2FUNCTION_MAP_VALID;
+		WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i, reg);
+	}
+
+	/* Clear Valid bits in ME Thread to PCIe Function Mapping Group B */
+	for (i = 0; i < ME2FUNCTION_MAP_B_NUM_REGS; i++) {
+		reg = READ_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i);
+		reg &= ~ME2FUNCTION_MAP_VALID;
+		WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i, reg);
+	}
+
+	for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) {
+		tasklet_disable(&vf->vf2pf_bh_tasklet);
+		tasklet_kill(&vf->vf2pf_bh_tasklet);
+		mutex_destroy(&vf->pf2vf_lock);
+	}
+
+	kfree(accel_dev->pf.vf_info);
+	accel_dev->pf.vf_info = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_disable_sriov);
+
+/**
+ * adf_sriov_configure() - Enable SRIOV for the device
+ * @pdev:  Pointer to pci device.
+ *
+ * Function enables SRIOV for the pci device.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+	int totalvfs = pci_sriov_get_totalvfs(pdev);
+	unsigned long val;
+	int ret;
+
+	if (!accel_dev) {
+		dev_err(&pdev->dev, "Failed to find accel_dev\n");
+		return -EFAULT;
+	}
+
+	if (!iommu_present(&pci_bus_type))
+		dev_warn(&pdev->dev, "IOMMU should be enabled for SR-IOV to work correctly\n");
+
+	if (accel_dev->pf.vf_info) {
+		dev_info(&pdev->dev, "Already enabled for this device\n");
+		return -EINVAL;
+	}
+
+	if (adf_dev_started(accel_dev)) {
+		if (adf_devmgr_in_reset(accel_dev) ||
+		    adf_dev_in_use(accel_dev)) {
+			dev_err(&GET_DEV(accel_dev), "Device busy\n");
+			return -EBUSY;
+		}
+
+		adf_dev_stop(accel_dev);
+		adf_dev_shutdown(accel_dev);
+	}
+
+	if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
+		return -EFAULT;
+	val = 0;
+	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+					ADF_NUM_CY, (void *)&val, ADF_DEC))
+		return -EFAULT;
+
+	set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+
+	/* Allocate memory for VF info structs */
+	accel_dev->pf.vf_info = kcalloc(totalvfs,
+					sizeof(struct adf_accel_vf_info),
+					GFP_KERNEL);
+	if (!accel_dev->pf.vf_info)
+		return -ENOMEM;
+
+	if (adf_dev_init(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to init qat_dev%d\n",
+			accel_dev->accel_id);
+		return -EFAULT;
+	}
+
+	if (adf_dev_start(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
+			accel_dev->accel_id);
+		return -EFAULT;
+	}
+
+	ret = adf_enable_sriov(accel_dev);
+	if (ret)
+		return ret;
+
+	return numvfs;
+}
+EXPORT_SYMBOL_GPL(adf_sriov_configure);
+
+int __init adf_init_pf_wq(void)
+{
+	/* Workqueue for PF2VF responses */
+	pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", WQ_MEM_RECLAIM, 0);
+
+	return !pf2vf_resp_wq ? -ENOMEM : 0;
+}
+
+void adf_exit_pf_wq(void)
+{
+	if (pf2vf_resp_wq) {
+		destroy_workqueue(pf2vf_resp_wq);
+		pf2vf_resp_wq = NULL;
+	}
+}
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_transport.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_transport.c
new file mode 100644
index 0000000..57d2622
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_transport.c
@@ -0,0 +1,566 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_transport_internal.h"
+#include "adf_transport_access_macros.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
+{
+	uint32_t div = data >> shift;
+	uint32_t mult = div << shift;
+
+	return data - mult;
+}
+
+static inline int adf_check_ring_alignment(uint64_t addr, uint64_t size)
+{
+	if (((size - 1) & addr) != 0)
+		return -EFAULT;
+	return 0;
+}
+
+static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num)
+{
+	int i = ADF_MIN_RING_SIZE;
+
+	for (; i <= ADF_MAX_RING_SIZE; i++)
+		if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
+			return i;
+
+	return ADF_DEFAULT_RING_SIZE;
+}
+
+static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
+{
+	spin_lock(&bank->lock);
+	if (bank->ring_mask & (1 << ring)) {
+		spin_unlock(&bank->lock);
+		return -EFAULT;
+	}
+	bank->ring_mask |= (1 << ring);
+	spin_unlock(&bank->lock);
+	return 0;
+}
+
+static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
+{
+	spin_lock(&bank->lock);
+	bank->ring_mask &= ~(1 << ring);
+	spin_unlock(&bank->lock);
+}
+
+static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
+{
+	spin_lock_bh(&bank->lock);
+	bank->irq_mask |= (1 << ring);
+	spin_unlock_bh(&bank->lock);
+	WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
+	WRITE_CSR_INT_COL_CTL(bank->csr_addr, bank->bank_number,
+			      bank->irq_coalesc_timer);
+}
+
+static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
+{
+	spin_lock_bh(&bank->lock);
+	bank->irq_mask &= ~(1 << ring);
+	spin_unlock_bh(&bank->lock);
+	WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
+}
+
+int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg)
+{
+	if (atomic_add_return(1, ring->inflights) >
+	    ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
+		atomic_dec(ring->inflights);
+		return -EAGAIN;
+	}
+	spin_lock_bh(&ring->lock);
+	memcpy((void *)((uintptr_t)ring->base_addr + ring->tail), msg,
+	       ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
+
+	ring->tail = adf_modulo(ring->tail +
+				ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
+				ADF_RING_SIZE_MODULO(ring->ring_size));
+	WRITE_CSR_RING_TAIL(ring->bank->csr_addr, ring->bank->bank_number,
+			    ring->ring_number, ring->tail);
+	spin_unlock_bh(&ring->lock);
+	return 0;
+}
+
+static int adf_handle_response(struct adf_etr_ring_data *ring)
+{
+	uint32_t msg_counter = 0;
+	uint32_t *msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head);
+
+	while (*msg != ADF_RING_EMPTY_SIG) {
+		ring->callback((uint32_t *)msg);
+		atomic_dec(ring->inflights);
+		*msg = ADF_RING_EMPTY_SIG;
+		ring->head = adf_modulo(ring->head +
+					ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
+					ADF_RING_SIZE_MODULO(ring->ring_size));
+		msg_counter++;
+		msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head);
+	}
+	if (msg_counter > 0)
+		WRITE_CSR_RING_HEAD(ring->bank->csr_addr,
+				    ring->bank->bank_number,
+				    ring->ring_number, ring->head);
+	return 0;
+}
+
+static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
+{
+	uint32_t ring_config = BUILD_RING_CONFIG(ring->ring_size);
+
+	WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
+			      ring->ring_number, ring_config);
+}
+
+static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
+{
+	uint32_t ring_config =
+			BUILD_RESP_RING_CONFIG(ring->ring_size,
+					       ADF_RING_NEAR_WATERMARK_512,
+					       ADF_RING_NEAR_WATERMARK_0);
+
+	WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
+			      ring->ring_number, ring_config);
+}
+
+static int adf_init_ring(struct adf_etr_ring_data *ring)
+{
+	struct adf_etr_bank_data *bank = ring->bank;
+	struct adf_accel_dev *accel_dev = bank->accel_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	uint64_t ring_base;
+	uint32_t ring_size_bytes =
+			ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
+
+	ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
+	ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
+					     ring_size_bytes, &ring->dma_addr,
+					     GFP_KERNEL);
+	if (!ring->base_addr)
+		return -ENOMEM;
+
+	memset(ring->base_addr, 0x7F, ring_size_bytes);
+	/* The base_addr has to be aligned to the size of the buffer */
+	if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
+		dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
+		dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
+				  ring->base_addr, ring->dma_addr);
+		return -EFAULT;
+	}
+
+	if (hw_data->tx_rings_mask & (1 << ring->ring_number))
+		adf_configure_tx_ring(ring);
+
+	else
+		adf_configure_rx_ring(ring);
+
+	ring_base = BUILD_RING_BASE_ADDR(ring->dma_addr, ring->ring_size);
+	WRITE_CSR_RING_BASE(ring->bank->csr_addr, ring->bank->bank_number,
+			    ring->ring_number, ring_base);
+	spin_lock_init(&ring->lock);
+	return 0;
+}
+
+static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
+{
+	uint32_t ring_size_bytes =
+			ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
+	ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
+
+	if (ring->base_addr) {
+		memset(ring->base_addr, 0x7F, ring_size_bytes);
+		dma_free_coherent(&GET_DEV(ring->bank->accel_dev),
+				  ring_size_bytes, ring->base_addr,
+				  ring->dma_addr);
+	}
+}
+
+int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
+		    uint32_t bank_num, uint32_t num_msgs,
+		    uint32_t msg_size, const char *ring_name,
+		    adf_callback_fn callback, int poll_mode,
+		    struct adf_etr_ring_data **ring_ptr)
+{
+	struct adf_etr_data *transport_data = accel_dev->transport;
+	struct adf_etr_bank_data *bank;
+	struct adf_etr_ring_data *ring;
+	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+	uint32_t ring_num;
+	int ret;
+
+	if (bank_num >= GET_MAX_BANKS(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev), "Invalid bank number\n");
+		return -EFAULT;
+	}
+	if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
+		dev_err(&GET_DEV(accel_dev), "Invalid msg size\n");
+		return -EFAULT;
+	}
+	if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
+			      ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
+		dev_err(&GET_DEV(accel_dev),
+			"Invalid ring size for given msg size\n");
+		return -EFAULT;
+	}
+	if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
+		dev_err(&GET_DEV(accel_dev), "Section %s, no such entry : %s\n",
+			section, ring_name);
+		return -EFAULT;
+	}
+	if (kstrtouint(val, 10, &ring_num)) {
+		dev_err(&GET_DEV(accel_dev), "Can't get ring number\n");
+		return -EFAULT;
+	}
+	if (ring_num >= ADF_ETR_MAX_RINGS_PER_BANK) {
+		dev_err(&GET_DEV(accel_dev), "Invalid ring number\n");
+		return -EFAULT;
+	}
+
+	bank = &transport_data->banks[bank_num];
+	if (adf_reserve_ring(bank, ring_num)) {
+		dev_err(&GET_DEV(accel_dev), "Ring %d, %s already exists.\n",
+			ring_num, ring_name);
+		return -EFAULT;
+	}
+	ring = &bank->rings[ring_num];
+	ring->ring_number = ring_num;
+	ring->bank = bank;
+	ring->callback = callback;
+	ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
+	ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
+	ring->head = 0;
+	ring->tail = 0;
+	atomic_set(ring->inflights, 0);
+	ret = adf_init_ring(ring);
+	if (ret)
+		goto err;
+
+	/* Enable HW arbitration for the given ring */
+	adf_update_ring_arb(ring);
+
+	if (adf_ring_debugfs_add(ring, ring_name)) {
+		dev_err(&GET_DEV(accel_dev),
+			"Couldn't add ring debugfs entry\n");
+		ret = -EFAULT;
+		goto err;
+	}
+
+	/* Enable interrupts if needed */
+	if (callback && (!poll_mode))
+		adf_enable_ring_irq(bank, ring->ring_number);
+	*ring_ptr = ring;
+	return 0;
+err:
+	adf_cleanup_ring(ring);
+	adf_unreserve_ring(bank, ring_num);
+	adf_update_ring_arb(ring);
+	return ret;
+}
+
+void adf_remove_ring(struct adf_etr_ring_data *ring)
+{
+	struct adf_etr_bank_data *bank = ring->bank;
+
+	/* Disable interrupts for the given ring */
+	adf_disable_ring_irq(bank, ring->ring_number);
+
+	/* Clear PCI config space */
+	WRITE_CSR_RING_CONFIG(bank->csr_addr, bank->bank_number,
+			      ring->ring_number, 0);
+	WRITE_CSR_RING_BASE(bank->csr_addr, bank->bank_number,
+			    ring->ring_number, 0);
+	adf_ring_debugfs_rm(ring);
+	adf_unreserve_ring(bank, ring->ring_number);
+	/* Disable HW arbitration for the given ring */
+	adf_update_ring_arb(ring);
+	adf_cleanup_ring(ring);
+}
+
+static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
+{
+	uint32_t empty_rings, i;
+
+	empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number);
+	empty_rings = ~empty_rings & bank->irq_mask;
+
+	for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; ++i) {
+		if (empty_rings & (1 << i))
+			adf_handle_response(&bank->rings[i]);
+	}
+}
+
+void adf_response_handler(uintptr_t bank_addr)
+{
+	struct adf_etr_bank_data *bank = (void *)bank_addr;
+
+	/* Handle all the responses and reenable IRQs */
+	adf_ring_response_handler(bank);
+	WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
+				   bank->irq_mask);
+}
+
+static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
+				  const char *section, const char *format,
+				  uint32_t key, uint32_t *value)
+{
+	char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+	char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+
+	snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
+
+	if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
+		return -EFAULT;
+
+	if (kstrtouint(val_buf, 10, value))
+		return -EFAULT;
+	return 0;
+}
+
+static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
+				  const char *section,
+				  uint32_t bank_num_in_accel)
+{
+	if (adf_get_cfg_int(bank->accel_dev, section,
+			    ADF_ETRMGR_COALESCE_TIMER_FORMAT,
+			    bank_num_in_accel, &bank->irq_coalesc_timer))
+		bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
+
+	if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer ||
+	    ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer)
+		bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
+}
+
+static int adf_init_bank(struct adf_accel_dev *accel_dev,
+			 struct adf_etr_bank_data *bank,
+			 uint32_t bank_num, void __iomem *csr_addr)
+{
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_etr_ring_data *ring;
+	struct adf_etr_ring_data *tx_ring;
+	uint32_t i, coalesc_enabled = 0;
+
+	memset(bank, 0, sizeof(*bank));
+	bank->bank_number = bank_num;
+	bank->csr_addr = csr_addr;
+	bank->accel_dev = accel_dev;
+	spin_lock_init(&bank->lock);
+
+	/* Enable IRQ coalescing always. This will allow to use
+	 * the optimised flag and coalesc register.
+	 * If it is disabled in the config file just use min time value */
+	if ((adf_get_cfg_int(accel_dev, "Accelerator0",
+			     ADF_ETRMGR_COALESCING_ENABLED_FORMAT, bank_num,
+			     &coalesc_enabled) == 0) && coalesc_enabled)
+		adf_get_coalesc_timer(bank, "Accelerator0", bank_num);
+	else
+		bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
+
+	for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
+		WRITE_CSR_RING_CONFIG(csr_addr, bank_num, i, 0);
+		WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
+		ring = &bank->rings[i];
+		if (hw_data->tx_rings_mask & (1 << i)) {
+			ring->inflights =
+				kzalloc_node(sizeof(atomic_t),
+					     GFP_KERNEL,
+					     dev_to_node(&GET_DEV(accel_dev)));
+			if (!ring->inflights)
+				goto err;
+		} else {
+			if (i < hw_data->tx_rx_gap) {
+				dev_err(&GET_DEV(accel_dev),
+					"Invalid tx rings mask config\n");
+				goto err;
+			}
+			tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
+			ring->inflights = tx_ring->inflights;
+		}
+	}
+	if (adf_bank_debugfs_add(bank)) {
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to add bank debugfs entry\n");
+		goto err;
+	}
+
+	WRITE_CSR_INT_FLAG(csr_addr, bank_num, ADF_BANK_INT_FLAG_CLEAR_MASK);
+	WRITE_CSR_INT_SRCSEL(csr_addr, bank_num);
+	return 0;
+err:
+	for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
+		ring = &bank->rings[i];
+		if (hw_data->tx_rings_mask & (1 << i))
+			kfree(ring->inflights);
+	}
+	return -ENOMEM;
+}
+
+/**
+ * adf_init_etr_data() - Initialize transport rings for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function is the initializes the communications channels (rings) to the
+ * acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_init_etr_data(struct adf_accel_dev *accel_dev)
+{
+	struct adf_etr_data *etr_data;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	void __iomem *csr_addr;
+	uint32_t size;
+	uint32_t num_banks = 0;
+	int i, ret;
+
+	etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
+				dev_to_node(&GET_DEV(accel_dev)));
+	if (!etr_data)
+		return -ENOMEM;
+
+	num_banks = GET_MAX_BANKS(accel_dev);
+	size = num_banks * sizeof(struct adf_etr_bank_data);
+	etr_data->banks = kzalloc_node(size, GFP_KERNEL,
+				       dev_to_node(&GET_DEV(accel_dev)));
+	if (!etr_data->banks) {
+		ret = -ENOMEM;
+		goto err_bank;
+	}
+
+	accel_dev->transport = etr_data;
+	i = hw_data->get_etr_bar_id(hw_data);
+	csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
+
+	/* accel_dev->debugfs_dir should always be non-NULL here */
+	etr_data->debug = debugfs_create_dir("transport",
+					     accel_dev->debugfs_dir);
+	if (!etr_data->debug) {
+		dev_err(&GET_DEV(accel_dev),
+			"Unable to create transport debugfs entry\n");
+		ret = -ENOENT;
+		goto err_bank_debug;
+	}
+
+	for (i = 0; i < num_banks; i++) {
+		ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
+				    csr_addr);
+		if (ret)
+			goto err_bank_all;
+	}
+
+	return 0;
+
+err_bank_all:
+	debugfs_remove(etr_data->debug);
+err_bank_debug:
+	kfree(etr_data->banks);
+err_bank:
+	kfree(etr_data);
+	accel_dev->transport = NULL;
+	return ret;
+}
+EXPORT_SYMBOL_GPL(adf_init_etr_data);
+
+static void cleanup_bank(struct adf_etr_bank_data *bank)
+{
+	uint32_t i;
+
+	for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
+		struct adf_accel_dev *accel_dev = bank->accel_dev;
+		struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+		struct adf_etr_ring_data *ring = &bank->rings[i];
+
+		if (bank->ring_mask & (1 << i))
+			adf_cleanup_ring(ring);
+
+		if (hw_data->tx_rings_mask & (1 << i))
+			kfree(ring->inflights);
+	}
+	adf_bank_debugfs_rm(bank);
+	memset(bank, 0, sizeof(*bank));
+}
+
+static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
+{
+	struct adf_etr_data *etr_data = accel_dev->transport;
+	uint32_t i, num_banks = GET_MAX_BANKS(accel_dev);
+
+	for (i = 0; i < num_banks; i++)
+		cleanup_bank(&etr_data->banks[i]);
+}
+
+/**
+ * adf_cleanup_etr_data() - Clear transport rings for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function is the clears the communications channels (rings) of the
+ * acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
+{
+	struct adf_etr_data *etr_data = accel_dev->transport;
+
+	if (etr_data) {
+		adf_cleanup_etr_handles(accel_dev);
+		debugfs_remove(etr_data->debug);
+		kfree(etr_data->banks);
+		kfree(etr_data);
+		accel_dev->transport = NULL;
+	}
+}
+EXPORT_SYMBOL_GPL(adf_cleanup_etr_data);
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_transport.h b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_transport.h
new file mode 100644
index 0000000..386485b
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_transport.h
@@ -0,0 +1,63 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_TRANSPORT_H
+#define ADF_TRANSPORT_H
+
+#include "adf_accel_devices.h"
+
+struct adf_etr_ring_data;
+
+typedef void (*adf_callback_fn)(void *resp_msg);
+
+int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
+		    uint32_t bank_num, uint32_t num_mgs, uint32_t msg_size,
+		    const char *ring_name, adf_callback_fn callback,
+		    int poll_mode, struct adf_etr_ring_data **ring_ptr);
+
+int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg);
+void adf_remove_ring(struct adf_etr_ring_data *ring);
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
new file mode 100644
index 0000000..80e02a2
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
@@ -0,0 +1,169 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_H
+#define ADF_TRANSPORT_ACCESS_MACROS_H
+
+#include "adf_accel_devices.h"
+#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
+#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
+#define ADF_BANK_INT_FLAG_CLEAR_MASK 0xFFFF
+#define ADF_RING_CSR_RING_CONFIG 0x000
+#define ADF_RING_CSR_RING_LBASE 0x040
+#define ADF_RING_CSR_RING_UBASE 0x080
+#define ADF_RING_CSR_RING_HEAD 0x0C0
+#define ADF_RING_CSR_RING_TAIL 0x100
+#define ADF_RING_CSR_E_STAT 0x14C
+#define ADF_RING_CSR_INT_FLAG	0x170
+#define ADF_RING_CSR_INT_SRCSEL 0x174
+#define ADF_RING_CSR_INT_SRCSEL_2 0x178
+#define ADF_RING_CSR_INT_COL_EN 0x17C
+#define ADF_RING_CSR_INT_COL_CTL 0x180
+#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
+#define ADF_RING_CSR_INT_COL_CTL_ENABLE	0x80000000
+#define ADF_RING_BUNDLE_SIZE 0x1000
+#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A
+#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05
+#define ADF_COALESCING_MIN_TIME 0x1FF
+#define ADF_COALESCING_MAX_TIME 0xFFFFF
+#define ADF_COALESCING_DEF_TIME 0x27FF
+#define ADF_RING_NEAR_WATERMARK_512 0x08
+#define ADF_RING_NEAR_WATERMARK_0 0x00
+#define ADF_RING_EMPTY_SIG 0x7F7F7F7F
+
+/* Valid internal ring size values */
+#define ADF_RING_SIZE_128 0x01
+#define ADF_RING_SIZE_256 0x02
+#define ADF_RING_SIZE_512 0x03
+#define ADF_RING_SIZE_4K 0x06
+#define ADF_RING_SIZE_16K 0x08
+#define ADF_RING_SIZE_4M 0x10
+#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
+#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
+#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
+
+/* Valid internal msg size values */
+#define ADF_MSG_SIZE_32 0x01
+#define ADF_MSG_SIZE_64 0x02
+#define ADF_MSG_SIZE_128 0x04
+#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32
+#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128
+
+/* Size to bytes conversion macros for ring and msg size values */
+#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
+#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
+#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
+#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
+
+/* Minimum ring bufer size for memory allocation */
+#define ADF_RING_SIZE_BYTES_MIN(SIZE) \
+	((SIZE < ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K)) ? \
+		ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K) : SIZE)
+#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
+#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
+				SIZE) & ~0x4)
+/* Max outstanding requests */
+#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
+	((((1 << (RING_SIZE - 1)) << 3) >> ADF_SIZE_TO_POW(MSG_SIZE)) - 1)
+#define BUILD_RING_CONFIG(size)	\
+	((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \
+	| (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+	| size)
+#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \
+	((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM)	\
+	| (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+	| size)
+#define BUILD_RING_BASE_ADDR(addr, size) \
+	((addr >> 6) & (0xFFFFFFFFFFFFFFFFULL << size))
+#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
+	ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+			ADF_RING_CSR_RING_HEAD + (ring << 2))
+#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
+	ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+			ADF_RING_CSR_RING_TAIL + (ring << 2))
+#define READ_CSR_E_STAT(csr_base_addr, bank) \
+	ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+			ADF_RING_CSR_E_STAT)
+#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+		ADF_RING_CSR_RING_CONFIG + (ring << 2), value)
+#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
+do { \
+	uint32_t l_base = 0, u_base = 0; \
+	l_base = (uint32_t)(value & 0xFFFFFFFF); \
+	u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+		ADF_RING_CSR_RING_LBASE + (ring << 2), l_base);	\
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+		ADF_RING_CSR_RING_UBASE + (ring << 2), u_base);	\
+} while (0)
+#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+		ADF_RING_CSR_RING_HEAD + (ring << 2), value)
+#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+		ADF_RING_CSR_RING_TAIL + (ring << 2), value)
+#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+			ADF_RING_CSR_INT_FLAG, value)
+#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
+do { \
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+	ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0);	\
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+	ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \
+} while (0)
+#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+			ADF_RING_CSR_INT_COL_EN, value)
+#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+			ADF_RING_CSR_INT_COL_CTL, \
+			ADF_RING_CSR_INT_COL_CTL_ENABLE | value)
+#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
+	ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+			ADF_RING_CSR_INT_FLAG_AND_COL, value)
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_transport_debug.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_transport_debug.c
new file mode 100644
index 0000000..52340b9
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_transport_debug.c
@@ -0,0 +1,294 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include "adf_accel_devices.h"
+#include "adf_transport_internal.h"
+#include "adf_transport_access_macros.h"
+
+static DEFINE_MUTEX(ring_read_lock);
+static DEFINE_MUTEX(bank_read_lock);
+
+static void *adf_ring_start(struct seq_file *sfile, loff_t *pos)
+{
+	struct adf_etr_ring_data *ring = sfile->private;
+
+	mutex_lock(&ring_read_lock);
+	if (*pos == 0)
+		return SEQ_START_TOKEN;
+
+	if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
+		     ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
+		return NULL;
+
+	return ring->base_addr +
+		(ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
+}
+
+static void *adf_ring_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+	struct adf_etr_ring_data *ring = sfile->private;
+
+	if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
+		     ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
+		return NULL;
+
+	return ring->base_addr +
+		(ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
+}
+
+static int adf_ring_show(struct seq_file *sfile, void *v)
+{
+	struct adf_etr_ring_data *ring = sfile->private;
+	struct adf_etr_bank_data *bank = ring->bank;
+	void __iomem *csr = ring->bank->csr_addr;
+
+	if (v == SEQ_START_TOKEN) {
+		int head, tail, empty;
+
+		head = READ_CSR_RING_HEAD(csr, bank->bank_number,
+					  ring->ring_number);
+		tail = READ_CSR_RING_TAIL(csr, bank->bank_number,
+					  ring->ring_number);
+		empty = READ_CSR_E_STAT(csr, bank->bank_number);
+
+		seq_puts(sfile, "------- Ring configuration -------\n");
+		seq_printf(sfile, "ring name: %s\n",
+			   ring->ring_debug->ring_name);
+		seq_printf(sfile, "ring num %d, bank num %d\n",
+			   ring->ring_number, ring->bank->bank_number);
+		seq_printf(sfile, "head %x, tail %x, empty: %d\n",
+			   head, tail, (empty & 1 << ring->ring_number)
+			   >> ring->ring_number);
+		seq_printf(sfile, "ring size %d, msg size %d\n",
+			   ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size),
+			   ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
+		seq_puts(sfile, "----------- Ring data ------------\n");
+		return 0;
+	}
+	seq_hex_dump(sfile, "", DUMP_PREFIX_ADDRESS, 32, 4,
+		     v, ADF_MSG_SIZE_TO_BYTES(ring->msg_size), false);
+	return 0;
+}
+
+static void adf_ring_stop(struct seq_file *sfile, void *v)
+{
+	mutex_unlock(&ring_read_lock);
+}
+
+static const struct seq_operations adf_ring_sops = {
+	.start = adf_ring_start,
+	.next = adf_ring_next,
+	.stop = adf_ring_stop,
+	.show = adf_ring_show
+};
+
+static int adf_ring_open(struct inode *inode, struct file *file)
+{
+	int ret = seq_open(file, &adf_ring_sops);
+
+	if (!ret) {
+		struct seq_file *seq_f = file->private_data;
+
+		seq_f->private = inode->i_private;
+	}
+	return ret;
+}
+
+static const struct file_operations adf_ring_debug_fops = {
+	.open = adf_ring_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release
+};
+
+int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
+{
+	struct adf_etr_ring_debug_entry *ring_debug;
+	char entry_name[8];
+
+	ring_debug = kzalloc(sizeof(*ring_debug), GFP_KERNEL);
+	if (!ring_debug)
+		return -ENOMEM;
+
+	strlcpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name));
+	snprintf(entry_name, sizeof(entry_name), "ring_%02d",
+		 ring->ring_number);
+
+	ring_debug->debug = debugfs_create_file(entry_name, S_IRUSR,
+						ring->bank->bank_debug_dir,
+						ring, &adf_ring_debug_fops);
+	if (!ring_debug->debug) {
+		pr_err("QAT: Failed to create ring debug entry.\n");
+		kfree(ring_debug);
+		return -EFAULT;
+	}
+	ring->ring_debug = ring_debug;
+	return 0;
+}
+
+void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring)
+{
+	if (ring->ring_debug) {
+		debugfs_remove(ring->ring_debug->debug);
+		kfree(ring->ring_debug);
+		ring->ring_debug = NULL;
+	}
+}
+
+static void *adf_bank_start(struct seq_file *sfile, loff_t *pos)
+{
+	mutex_lock(&bank_read_lock);
+	if (*pos == 0)
+		return SEQ_START_TOKEN;
+
+	if (*pos >= ADF_ETR_MAX_RINGS_PER_BANK)
+		return NULL;
+
+	return pos;
+}
+
+static void *adf_bank_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+	if (++(*pos) >= ADF_ETR_MAX_RINGS_PER_BANK)
+		return NULL;
+
+	return pos;
+}
+
+static int adf_bank_show(struct seq_file *sfile, void *v)
+{
+	struct adf_etr_bank_data *bank = sfile->private;
+
+	if (v == SEQ_START_TOKEN) {
+		seq_printf(sfile, "------- Bank %d configuration -------\n",
+			   bank->bank_number);
+	} else {
+		int ring_id = *((int *)v) - 1;
+		struct adf_etr_ring_data *ring = &bank->rings[ring_id];
+		void __iomem *csr = bank->csr_addr;
+		int head, tail, empty;
+
+		if (!(bank->ring_mask & 1 << ring_id))
+			return 0;
+
+		head = READ_CSR_RING_HEAD(csr, bank->bank_number,
+					  ring->ring_number);
+		tail = READ_CSR_RING_TAIL(csr, bank->bank_number,
+					  ring->ring_number);
+		empty = READ_CSR_E_STAT(csr, bank->bank_number);
+
+		seq_printf(sfile,
+			   "ring num %02d, head %04x, tail %04x, empty: %d\n",
+			   ring->ring_number, head, tail,
+			   (empty & 1 << ring->ring_number) >>
+			   ring->ring_number);
+	}
+	return 0;
+}
+
+static void adf_bank_stop(struct seq_file *sfile, void *v)
+{
+	mutex_unlock(&bank_read_lock);
+}
+
+static const struct seq_operations adf_bank_sops = {
+	.start = adf_bank_start,
+	.next = adf_bank_next,
+	.stop = adf_bank_stop,
+	.show = adf_bank_show
+};
+
+static int adf_bank_open(struct inode *inode, struct file *file)
+{
+	int ret = seq_open(file, &adf_bank_sops);
+
+	if (!ret) {
+		struct seq_file *seq_f = file->private_data;
+
+		seq_f->private = inode->i_private;
+	}
+	return ret;
+}
+
+static const struct file_operations adf_bank_debug_fops = {
+	.open = adf_bank_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release
+};
+
+int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
+{
+	struct adf_accel_dev *accel_dev = bank->accel_dev;
+	struct dentry *parent = accel_dev->transport->debug;
+	char name[8];
+
+	snprintf(name, sizeof(name), "bank_%02d", bank->bank_number);
+	bank->bank_debug_dir = debugfs_create_dir(name, parent);
+	if (!bank->bank_debug_dir) {
+		pr_err("QAT: Failed to create bank debug dir.\n");
+		return -EFAULT;
+	}
+
+	bank->bank_debug_cfg = debugfs_create_file("config", S_IRUSR,
+						   bank->bank_debug_dir, bank,
+						   &adf_bank_debug_fops);
+	if (!bank->bank_debug_cfg) {
+		pr_err("QAT: Failed to create bank debug entry.\n");
+		debugfs_remove(bank->bank_debug_dir);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank)
+{
+	debugfs_remove(bank->bank_debug_cfg);
+	debugfs_remove(bank->bank_debug_dir);
+}
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_transport_internal.h b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_transport_internal.h
new file mode 100644
index 0000000..bb88336
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_transport_internal.h
@@ -0,0 +1,117 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_TRANSPORT_INTRN_H
+#define ADF_TRANSPORT_INTRN_H
+
+#include <linux/interrupt.h>
+#include <linux/spinlock_types.h>
+#include "adf_transport.h"
+
+struct adf_etr_ring_debug_entry {
+	char ring_name[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+	struct dentry *debug;
+};
+
+struct adf_etr_ring_data {
+	void *base_addr;
+	atomic_t *inflights;
+	spinlock_t lock;	/* protects ring data struct */
+	adf_callback_fn callback;
+	struct adf_etr_bank_data *bank;
+	dma_addr_t dma_addr;
+	uint16_t head;
+	uint16_t tail;
+	uint8_t ring_number;
+	uint8_t ring_size;
+	uint8_t msg_size;
+	uint8_t reserved;
+	struct adf_etr_ring_debug_entry *ring_debug;
+} __packed;
+
+struct adf_etr_bank_data {
+	struct adf_etr_ring_data rings[ADF_ETR_MAX_RINGS_PER_BANK];
+	struct tasklet_struct resp_handler;
+	void __iomem *csr_addr;
+	struct adf_accel_dev *accel_dev;
+	uint32_t irq_coalesc_timer;
+	uint16_t ring_mask;
+	uint16_t irq_mask;
+	spinlock_t lock;	/* protects bank data struct */
+	struct dentry *bank_debug_dir;
+	struct dentry *bank_debug_cfg;
+	uint32_t bank_number;
+} __packed;
+
+struct adf_etr_data {
+	struct adf_etr_bank_data *banks;
+	struct dentry *debug;
+};
+
+void adf_response_handler(uintptr_t bank_addr);
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+int adf_bank_debugfs_add(struct adf_etr_bank_data *bank);
+void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank);
+int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name);
+void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring);
+#else
+static inline int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
+{
+	return 0;
+}
+
+#define adf_bank_debugfs_rm(bank) do {} while (0)
+
+static inline int adf_ring_debugfs_add(struct adf_etr_ring_data *ring,
+				       const char *name)
+{
+	return 0;
+}
+
+#define adf_ring_debugfs_rm(ring) do {} while (0)
+#endif
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
new file mode 100644
index 0000000..cd5f37d
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
@@ -0,0 +1,92 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pf2vf_msg.h"
+
+/**
+ * adf_vf2pf_init() - send init msg to PF
+ * @accel_dev:  Pointer to acceleration VF device.
+ *
+ * Function sends an init messge from the VF to a PF
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
+{
+	u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
+		(ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
+
+	if (adf_iov_putmsg(accel_dev, msg, 0)) {
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to send Init event to PF\n");
+		return -EFAULT;
+	}
+	set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_vf2pf_init);
+
+/**
+ * adf_vf2pf_shutdown() - send shutdown msg to PF
+ * @accel_dev:  Pointer to acceleration VF device.
+ *
+ * Function sends a shutdown messge from the VF to a PF
+ *
+ * Return: void
+ */
+void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
+{
+	u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
+	    (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
+
+	if (test_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status))
+		if (adf_iov_putmsg(accel_dev, msg, 0))
+			dev_err(&GET_DEV(accel_dev),
+				"Failed to send Shutdown event to PF\n");
+}
+EXPORT_SYMBOL_GPL(adf_vf2pf_shutdown);
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_vf_isr.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_vf_isr.c
new file mode 100644
index 0000000..4a73fc7
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/adf_vf_isr.c
@@ -0,0 +1,335 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "adf_cfg_common.h"
+#include "adf_transport_access_macros.h"
+#include "adf_transport_internal.h"
+#include "adf_pf2vf_msg.h"
+
+#define ADF_VINTSOU_OFFSET	0x204
+#define ADF_VINTSOU_BUN		BIT(0)
+#define ADF_VINTSOU_PF2VF	BIT(1)
+
+static struct workqueue_struct *adf_vf_stop_wq;
+
+struct adf_vf_stop_data {
+	struct adf_accel_dev *accel_dev;
+	struct work_struct work;
+};
+
+static int adf_enable_msi(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+	int stat = pci_enable_msi(pci_dev_info->pci_dev);
+
+	if (stat) {
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to enable MSI interrupts\n");
+		return stat;
+	}
+
+	accel_dev->vf.irq_name = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
+	if (!accel_dev->vf.irq_name)
+		return -ENOMEM;
+
+	return stat;
+}
+
+static void adf_disable_msi(struct adf_accel_dev *accel_dev)
+{
+	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+	kfree(accel_dev->vf.irq_name);
+	pci_disable_msi(pdev);
+}
+
+static void adf_dev_stop_async(struct work_struct *work)
+{
+	struct adf_vf_stop_data *stop_data =
+		container_of(work, struct adf_vf_stop_data, work);
+	struct adf_accel_dev *accel_dev = stop_data->accel_dev;
+
+	adf_dev_stop(accel_dev);
+	adf_dev_shutdown(accel_dev);
+
+	/* Re-enable PF2VF interrupts */
+	adf_enable_pf2vf_interrupts(accel_dev);
+	kfree(stop_data);
+}
+
+static void adf_pf2vf_bh_handler(void *data)
+{
+	struct adf_accel_dev *accel_dev = data;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_bar *pmisc =
+			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+	void __iomem *pmisc_bar_addr = pmisc->virt_addr;
+	u32 msg;
+
+	/* Read the message from PF */
+	msg = ADF_CSR_RD(pmisc_bar_addr, hw_data->get_pf2vf_offset(0));
+
+	if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM))
+		/* Ignore legacy non-system (non-kernel) PF2VF messages */
+		goto err;
+
+	switch ((msg & ADF_PF2VF_MSGTYPE_MASK) >> ADF_PF2VF_MSGTYPE_SHIFT) {
+	case ADF_PF2VF_MSGTYPE_RESTARTING: {
+		struct adf_vf_stop_data *stop_data;
+
+		dev_dbg(&GET_DEV(accel_dev),
+			"Restarting msg received from PF 0x%x\n", msg);
+
+		clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
+		stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC);
+		if (!stop_data) {
+			dev_err(&GET_DEV(accel_dev),
+				"Couldn't schedule stop for vf_%d\n",
+				accel_dev->accel_id);
+			return;
+		}
+		stop_data->accel_dev = accel_dev;
+		INIT_WORK(&stop_data->work, adf_dev_stop_async);
+		queue_work(adf_vf_stop_wq, &stop_data->work);
+		/* To ack, clear the PF2VFINT bit */
+		msg &= ~ADF_PF2VF_INT;
+		ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg);
+		return;
+	}
+	case ADF_PF2VF_MSGTYPE_VERSION_RESP:
+		dev_dbg(&GET_DEV(accel_dev),
+			"Version resp received from PF 0x%x\n", msg);
+		accel_dev->vf.pf_version =
+			(msg & ADF_PF2VF_VERSION_RESP_VERS_MASK) >>
+			ADF_PF2VF_VERSION_RESP_VERS_SHIFT;
+		accel_dev->vf.compatible =
+			(msg & ADF_PF2VF_VERSION_RESP_RESULT_MASK) >>
+			ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+		complete(&accel_dev->vf.iov_msg_completion);
+		break;
+	default:
+		goto err;
+	}
+
+	/* To ack, clear the PF2VFINT bit */
+	msg &= ~ADF_PF2VF_INT;
+	ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg);
+
+	/* Re-enable PF2VF interrupts */
+	adf_enable_pf2vf_interrupts(accel_dev);
+	return;
+err:
+	dev_err(&GET_DEV(accel_dev),
+		"Unknown message from PF (0x%x); leaving PF2VF ints disabled\n",
+		msg);
+}
+
+static int adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev)
+{
+	tasklet_init(&accel_dev->vf.pf2vf_bh_tasklet,
+		     (void *)adf_pf2vf_bh_handler, (unsigned long)accel_dev);
+
+	mutex_init(&accel_dev->vf.vf2pf_lock);
+	return 0;
+}
+
+static void adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev)
+{
+	tasklet_disable(&accel_dev->vf.pf2vf_bh_tasklet);
+	tasklet_kill(&accel_dev->vf.pf2vf_bh_tasklet);
+	mutex_destroy(&accel_dev->vf.vf2pf_lock);
+}
+
+static irqreturn_t adf_isr(int irq, void *privdata)
+{
+	struct adf_accel_dev *accel_dev = privdata;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_bar *pmisc =
+			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+	void __iomem *pmisc_bar_addr = pmisc->virt_addr;
+	u32 v_int;
+
+	/* Read VF INT source CSR to determine the source of VF interrupt */
+	v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTSOU_OFFSET);
+
+	/* Check for PF2VF interrupt */
+	if (v_int & ADF_VINTSOU_PF2VF) {
+		/* Disable PF to VF interrupt */
+		adf_disable_pf2vf_interrupts(accel_dev);
+
+		/* Schedule tasklet to handle interrupt BH */
+		tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet);
+		return IRQ_HANDLED;
+	}
+
+	/* Check bundle interrupt */
+	if (v_int & ADF_VINTSOU_BUN) {
+		struct adf_etr_data *etr_data = accel_dev->transport;
+		struct adf_etr_bank_data *bank = &etr_data->banks[0];
+
+		/* Disable Flag and Coalesce Ring Interrupts */
+		WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
+					   0);
+		tasklet_hi_schedule(&bank->resp_handler);
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+}
+
+static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)
+{
+	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+	unsigned int cpu;
+	int ret;
+
+	snprintf(accel_dev->vf.irq_name, ADF_MAX_MSIX_VECTOR_NAME,
+		 "qat_%02x:%02d.%02d", pdev->bus->number, PCI_SLOT(pdev->devfn),
+		 PCI_FUNC(pdev->devfn));
+	ret = request_irq(pdev->irq, adf_isr, 0, accel_dev->vf.irq_name,
+			  (void *)accel_dev);
+	if (ret) {
+		dev_err(&GET_DEV(accel_dev), "failed to enable irq for %s\n",
+			accel_dev->vf.irq_name);
+		return ret;
+	}
+	cpu = accel_dev->accel_id % num_online_cpus();
+	irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu));
+
+	return ret;
+}
+
+static int adf_setup_bh(struct adf_accel_dev *accel_dev)
+{
+	struct adf_etr_data *priv_data = accel_dev->transport;
+
+	tasklet_init(&priv_data->banks[0].resp_handler, adf_response_handler,
+		     (unsigned long)priv_data->banks);
+	return 0;
+}
+
+static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
+{
+	struct adf_etr_data *priv_data = accel_dev->transport;
+
+	tasklet_disable(&priv_data->banks[0].resp_handler);
+	tasklet_kill(&priv_data->banks[0].resp_handler);
+}
+
+/**
+ * adf_vf_isr_resource_free() - Free IRQ for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function frees interrupts for acceleration device virtual function.
+ */
+void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
+{
+	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+	irq_set_affinity_hint(pdev->irq, NULL);
+	free_irq(pdev->irq, (void *)accel_dev);
+	adf_cleanup_bh(accel_dev);
+	adf_cleanup_pf2vf_bh(accel_dev);
+	adf_disable_msi(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_vf_isr_resource_free);
+
+/**
+ * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function allocates interrupts for acceleration device virtual function.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
+{
+	if (adf_enable_msi(accel_dev))
+		goto err_out;
+
+	if (adf_setup_pf2vf_bh(accel_dev))
+		goto err_out;
+
+	if (adf_setup_bh(accel_dev))
+		goto err_out;
+
+	if (adf_request_msi_irq(accel_dev))
+		goto err_out;
+
+	return 0;
+err_out:
+	adf_vf_isr_resource_free(accel_dev);
+	return -EFAULT;
+}
+EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
+
+int __init adf_init_vf_wq(void)
+{
+	adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0);
+
+	return !adf_vf_stop_wq ? -EFAULT : 0;
+}
+
+void adf_exit_vf_wq(void)
+{
+	if (adf_vf_stop_wq)
+		destroy_workqueue(adf_vf_stop_wq);
+
+	adf_vf_stop_wq = NULL;
+}
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/icp_qat_fw.h b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/icp_qat_fw.h
new file mode 100644
index 0000000..46747f0
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/icp_qat_fw.h
@@ -0,0 +1,318 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_FW_H_
+#define _ICP_QAT_FW_H_
+#include <linux/types.h>
+#include "icp_qat_hw.h"
+
+#define QAT_FIELD_SET(flags, val, bitpos, mask) \
+{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \
+		(((val) & (mask)) << (bitpos))) ; }
+
+#define QAT_FIELD_GET(flags, bitpos, mask) \
+	(((flags) >> (bitpos)) & (mask))
+
+#define ICP_QAT_FW_REQ_DEFAULT_SZ 128
+#define ICP_QAT_FW_RESP_DEFAULT_SZ 32
+#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8
+#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF
+#define ICP_QAT_FW_NUM_LONGWORDS_1 1
+#define ICP_QAT_FW_NUM_LONGWORDS_2 2
+#define ICP_QAT_FW_NUM_LONGWORDS_3 3
+#define ICP_QAT_FW_NUM_LONGWORDS_4 4
+#define ICP_QAT_FW_NUM_LONGWORDS_5 5
+#define ICP_QAT_FW_NUM_LONGWORDS_6 6
+#define ICP_QAT_FW_NUM_LONGWORDS_7 7
+#define ICP_QAT_FW_NUM_LONGWORDS_10 10
+#define ICP_QAT_FW_NUM_LONGWORDS_13 13
+#define ICP_QAT_FW_NULL_REQ_SERV_ID 1
+
+enum icp_qat_fw_comn_resp_serv_id {
+	ICP_QAT_FW_COMN_RESP_SERV_NULL,
+	ICP_QAT_FW_COMN_RESP_SERV_CPM_FW,
+	ICP_QAT_FW_COMN_RESP_SERV_DELIMITER
+};
+
+enum icp_qat_fw_comn_request_id {
+	ICP_QAT_FW_COMN_REQ_NULL = 0,
+	ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3,
+	ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4,
+	ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7,
+	ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9,
+	ICP_QAT_FW_COMN_REQ_DELIMITER
+};
+
+struct icp_qat_fw_comn_req_hdr_cd_pars {
+	union {
+		struct {
+			uint64_t content_desc_addr;
+			uint16_t content_desc_resrvd1;
+			uint8_t content_desc_params_sz;
+			uint8_t content_desc_hdr_resrvd2;
+			uint32_t content_desc_resrvd3;
+		} s;
+		struct {
+			uint32_t serv_specif_fields[4];
+		} s1;
+	} u;
+};
+
+struct icp_qat_fw_comn_req_mid {
+	uint64_t opaque_data;
+	uint64_t src_data_addr;
+	uint64_t dest_data_addr;
+	uint32_t src_length;
+	uint32_t dst_length;
+};
+
+struct icp_qat_fw_comn_req_cd_ctrl {
+	uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5];
+};
+
+struct icp_qat_fw_comn_req_hdr {
+	uint8_t resrvd1;
+	uint8_t service_cmd_id;
+	uint8_t service_type;
+	uint8_t hdr_flags;
+	uint16_t serv_specif_flags;
+	uint16_t comn_req_flags;
+};
+
+struct icp_qat_fw_comn_req_rqpars {
+	uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13];
+};
+
+struct icp_qat_fw_comn_req {
+	struct icp_qat_fw_comn_req_hdr comn_hdr;
+	struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+	struct icp_qat_fw_comn_req_mid comn_mid;
+	struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+	struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+struct icp_qat_fw_comn_error {
+	uint8_t xlat_err_code;
+	uint8_t cmp_err_code;
+};
+
+struct icp_qat_fw_comn_resp_hdr {
+	uint8_t resrvd1;
+	uint8_t service_id;
+	uint8_t response_type;
+	uint8_t hdr_flags;
+	struct icp_qat_fw_comn_error comn_error;
+	uint8_t comn_status;
+	uint8_t cmd_id;
+};
+
+struct icp_qat_fw_comn_resp {
+	struct icp_qat_fw_comn_resp_hdr comn_hdr;
+	uint64_t opaque_data;
+	uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1
+#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7
+#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
+	icp_qat_fw_comn_req_hdr_t.service_type
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \
+	icp_qat_fw_comn_req_hdr_t.service_type = val
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \
+	icp_qat_fw_comn_req_hdr_t.service_cmd_id
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \
+	icp_qat_fw_comn_req_hdr_t.service_cmd_id = val
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \
+	ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags)
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \
+	ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \
+	QAT_FIELD_GET(hdr_flags, \
+	ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+	ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \
+	(hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \
+	QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+	ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+	ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \
+	(((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
+	 ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
+
+#define QAT_COMN_PTR_TYPE_BITPOS 0
+#define QAT_COMN_PTR_TYPE_MASK 0x1
+#define QAT_COMN_CD_FLD_TYPE_BITPOS 1
+#define QAT_COMN_CD_FLD_TYPE_MASK 0x1
+#define QAT_COMN_PTR_TYPE_FLAT 0x0
+#define QAT_COMN_PTR_TYPE_SGL 0x1
+#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0
+#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1
+
+#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
+	((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
+	 | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+			QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \
+			QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+			QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4
+#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0
+#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0
+#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F
+
+#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \
+	((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+	>> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+	{ ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+	& ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+	((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+	 & ICP_QAT_FW_COMN_NEXT_ID_MASK)); }
+
+#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \
+	(((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+	{ ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+	& ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+	((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); }
+
+#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
+#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
+#define QAT_COMN_RESP_PKE_STATUS_BITPOS 6
+#define QAT_COMN_RESP_PKE_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
+#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
+#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
+#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
+
+#define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \
+	((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \
+	QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \
+	(((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \
+	QAT_COMN_RESP_CMP_STATUS_BITPOS) | \
+	(((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \
+	QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \
+	(((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \
+	QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS))
+
+#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \
+	QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \
+	QAT_COMN_RESP_CRYPTO_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \
+	QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \
+	QAT_COMN_RESP_CMP_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \
+	QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \
+	QAT_COMN_RESP_XLAT_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \
+	QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \
+	QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK)
+
+#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
+#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1
+#define ERR_CODE_NO_ERROR 0
+#define ERR_CODE_INVALID_BLOCK_TYPE -1
+#define ERR_CODE_NO_MATCH_ONES_COMP -2
+#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3
+#define ERR_CODE_INCOMPLETE_LEN -4
+#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5
+#define ERR_CODE_RPT_GT_SPEC_LEN -6
+#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7
+#define ERR_CODE_INV_DIS_CODE_LEN -8
+#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9
+#define ERR_CODE_DIS_TOO_FAR_BACK -10
+#define ERR_CODE_OVERFLOW_ERROR -11
+#define ERR_CODE_SOFT_ERROR -12
+#define ERR_CODE_FATAL_ERROR -13
+#define ERR_CODE_SSM_ERROR -14
+#define ERR_CODE_ENDPOINT_ERROR -15
+
+enum icp_qat_fw_slice {
+	ICP_QAT_FW_SLICE_NULL = 0,
+	ICP_QAT_FW_SLICE_CIPHER = 1,
+	ICP_QAT_FW_SLICE_AUTH = 2,
+	ICP_QAT_FW_SLICE_DRAM_RD = 3,
+	ICP_QAT_FW_SLICE_DRAM_WR = 4,
+	ICP_QAT_FW_SLICE_COMP = 5,
+	ICP_QAT_FW_SLICE_XLAT = 6,
+	ICP_QAT_FW_SLICE_DELIMITER
+};
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
new file mode 100644
index 0000000..72a59fa
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
@@ -0,0 +1,131 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_FW_INIT_ADMIN_H_
+#define _ICP_QAT_FW_INIT_ADMIN_H_
+
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_init_admin_cmd_id {
+	ICP_QAT_FW_INIT_ME = 0,
+	ICP_QAT_FW_TRNG_ENABLE = 1,
+	ICP_QAT_FW_TRNG_DISABLE = 2,
+	ICP_QAT_FW_CONSTANTS_CFG = 3,
+	ICP_QAT_FW_STATUS_GET = 4,
+	ICP_QAT_FW_COUNTERS_GET = 5,
+	ICP_QAT_FW_LOOPBACK = 6,
+	ICP_QAT_FW_HEARTBEAT_SYNC = 7,
+	ICP_QAT_FW_HEARTBEAT_GET = 8
+};
+
+enum icp_qat_fw_init_admin_resp_status {
+	ICP_QAT_FW_INIT_RESP_STATUS_SUCCESS = 0,
+	ICP_QAT_FW_INIT_RESP_STATUS_FAIL
+};
+
+struct icp_qat_fw_init_admin_req {
+	uint16_t init_cfg_sz;
+	uint8_t resrvd1;
+	uint8_t init_admin_cmd_id;
+	uint32_t resrvd2;
+	uint64_t opaque_data;
+	uint64_t init_cfg_ptr;
+	uint64_t resrvd3;
+};
+
+struct icp_qat_fw_init_admin_resp_hdr {
+	uint8_t flags;
+	uint8_t resrvd1;
+	uint8_t status;
+	uint8_t init_admin_cmd_id;
+};
+
+struct icp_qat_fw_init_admin_resp_pars {
+	union {
+		uint32_t resrvd1[ICP_QAT_FW_NUM_LONGWORDS_4];
+		struct {
+			uint32_t version_patch_num;
+			uint8_t context_id;
+			uint8_t ae_id;
+			uint16_t resrvd1;
+			uint64_t resrvd2;
+		} s1;
+		struct {
+			uint64_t req_rec_count;
+			uint64_t resp_sent_count;
+		} s2;
+	} u;
+};
+
+struct icp_qat_fw_init_admin_resp {
+	struct icp_qat_fw_init_admin_resp_hdr init_resp_hdr;
+	union {
+		uint32_t resrvd2;
+		struct {
+			uint16_t version_minor_num;
+			uint16_t version_major_num;
+		} s;
+	} u;
+	uint64_t opaque_data;
+	struct icp_qat_fw_init_admin_resp_pars init_resp_pars;
+};
+
+#define ICP_QAT_FW_COMN_HEARTBEAT_OK 0
+#define ICP_QAT_FW_COMN_HEARTBEAT_BLOCKED 1
+#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS 0
+#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_STATUS_RESRVD_FLD_MASK 0xFE
+#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_GET(hdr_t) \
+	ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(hdr_t.flags)
+
+#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_SET(hdr_t, val) \
+	ICP_QAT_FW_COMN_HEARTBEAT_FLAG_SET(hdr_t, val)
+
+#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(flags) \
+	QAT_FIELD_GET(flags, \
+		 ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS, \
+		 ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK)
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/icp_qat_fw_la.h b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/icp_qat_fw_la.h
new file mode 100644
index 0000000..c8d2669
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/icp_qat_fw_la.h
@@ -0,0 +1,404 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_FW_LA_H_
+#define _ICP_QAT_FW_LA_H_
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_la_cmd_id {
+	ICP_QAT_FW_LA_CMD_CIPHER = 0,
+	ICP_QAT_FW_LA_CMD_AUTH = 1,
+	ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2,
+	ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3,
+	ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4,
+	ICP_QAT_FW_LA_CMD_TRNG_TEST = 5,
+	ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6,
+	ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7,
+	ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8,
+	ICP_QAT_FW_LA_CMD_MGF1 = 9,
+	ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10,
+	ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11,
+	ICP_QAT_FW_LA_CMD_DELIMITER = 12
+};
+
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+
+struct icp_qat_fw_la_bulk_req {
+	struct icp_qat_fw_comn_req_hdr comn_hdr;
+	struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+	struct icp_qat_fw_comn_req_mid comn_mid;
+	struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+	struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1
+#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1
+#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11
+#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1
+#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0
+#define QAT_LA_DIGEST_IN_BUFFER_BITPOS	10
+#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1
+#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4
+#define ICP_QAT_FW_LA_GCM_PROTO	2
+#define ICP_QAT_FW_LA_CCM_PROTO	1
+#define ICP_QAT_FW_LA_NO_PROTO 0
+#define QAT_LA_PROTO_BITPOS 7
+#define QAT_LA_PROTO_MASK 0x7
+#define ICP_QAT_FW_LA_CMP_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0
+#define QAT_LA_CMP_AUTH_RES_BITPOS 6
+#define QAT_LA_CMP_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_RET_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0
+#define QAT_LA_RET_AUTH_RES_BITPOS 5
+#define QAT_LA_RET_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_UPDATE_STATE 1
+#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0
+#define QAT_LA_UPDATE_STATE_BITPOS 4
+#define QAT_LA_UPDATE_STATE_MASK 0x1
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1
+#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0
+#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1
+#define QAT_LA_CIPH_IV_FLD_BITPOS 2
+#define QAT_LA_CIPH_IV_FLD_MASK   0x1
+#define ICP_QAT_FW_LA_PARTIAL_NONE 0
+#define ICP_QAT_FW_LA_PARTIAL_START 1
+#define ICP_QAT_FW_LA_PARTIAL_MID 3
+#define ICP_QAT_FW_LA_PARTIAL_END 2
+#define QAT_LA_PARTIAL_BITPOS 0
+#define QAT_LA_PARTIAL_MASK 0x3
+#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
+	cmp_auth, ret_auth, update_state, \
+	ciph_iv, ciphcfg, partial) \
+	(((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \
+	QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \
+	((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \
+	QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \
+	((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \
+	QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \
+	((proto & QAT_LA_PROTO_MASK) << \
+	QAT_LA_PROTO_BITPOS)	| \
+	((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \
+	QAT_LA_CMP_AUTH_RES_BITPOS) | \
+	((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \
+	QAT_LA_RET_AUTH_RES_BITPOS) | \
+	((update_state & QAT_LA_UPDATE_STATE_MASK) << \
+	QAT_LA_UPDATE_STATE_BITPOS) | \
+	((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \
+	QAT_LA_CIPH_IV_FLD_BITPOS) | \
+	((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \
+	QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \
+	((partial & QAT_LA_PARTIAL_MASK) << \
+	QAT_LA_PARTIAL_BITPOS))
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \
+	QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+	QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+	QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+	QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \
+	QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \
+	QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+	QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \
+	QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \
+	QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \
+	QAT_LA_PARTIAL_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \
+	QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+	QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+	QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+	QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \
+	QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \
+	QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \
+	QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+	QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \
+	QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \
+	QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
+	QAT_LA_PARTIAL_MASK)
+
+struct icp_qat_fw_cipher_req_hdr_cd_pars {
+	union {
+		struct {
+			uint64_t content_desc_addr;
+			uint16_t content_desc_resrvd1;
+			uint8_t content_desc_params_sz;
+			uint8_t content_desc_hdr_resrvd2;
+			uint32_t content_desc_resrvd3;
+		} s;
+		struct {
+			uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+		} s1;
+	} u;
+};
+
+struct icp_qat_fw_cipher_auth_req_hdr_cd_pars {
+	union {
+		struct {
+			uint64_t content_desc_addr;
+			uint16_t content_desc_resrvd1;
+			uint8_t content_desc_params_sz;
+			uint8_t content_desc_hdr_resrvd2;
+			uint32_t content_desc_resrvd3;
+		} s;
+		struct {
+			uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+		} sl;
+	} u;
+};
+
+struct icp_qat_fw_cipher_cd_ctrl_hdr {
+	uint8_t cipher_state_sz;
+	uint8_t cipher_key_sz;
+	uint8_t cipher_cfg_offset;
+	uint8_t next_curr_id;
+	uint8_t cipher_padding_sz;
+	uint8_t resrvd1;
+	uint16_t resrvd2;
+	uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3];
+};
+
+struct icp_qat_fw_auth_cd_ctrl_hdr {
+	uint32_t resrvd1;
+	uint8_t resrvd2;
+	uint8_t hash_flags;
+	uint8_t hash_cfg_offset;
+	uint8_t next_curr_id;
+	uint8_t resrvd3;
+	uint8_t outer_prefix_sz;
+	uint8_t final_sz;
+	uint8_t inner_res_sz;
+	uint8_t resrvd4;
+	uint8_t inner_state1_sz;
+	uint8_t inner_state2_offset;
+	uint8_t inner_state2_sz;
+	uint8_t outer_config_offset;
+	uint8_t outer_state1_sz;
+	uint8_t outer_res_sz;
+	uint8_t outer_prefix_offset;
+};
+
+struct icp_qat_fw_cipher_auth_cd_ctrl_hdr {
+	uint8_t cipher_state_sz;
+	uint8_t cipher_key_sz;
+	uint8_t cipher_cfg_offset;
+	uint8_t next_curr_id_cipher;
+	uint8_t cipher_padding_sz;
+	uint8_t hash_flags;
+	uint8_t hash_cfg_offset;
+	uint8_t next_curr_id_auth;
+	uint8_t resrvd1;
+	uint8_t outer_prefix_sz;
+	uint8_t final_sz;
+	uint8_t inner_res_sz;
+	uint8_t resrvd2;
+	uint8_t inner_state1_sz;
+	uint8_t inner_state2_offset;
+	uint8_t inner_state2_sz;
+	uint8_t outer_config_offset;
+	uint8_t outer_state1_sz;
+	uint8_t outer_res_sz;
+	uint8_t outer_prefix_offset;
+};
+
+#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1
+#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0
+#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX	240
+#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \
+	(sizeof(struct icp_qat_fw_la_cipher_req_params_t))
+#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0)
+
+struct icp_qat_fw_la_cipher_req_params {
+	uint32_t cipher_offset;
+	uint32_t cipher_length;
+	union {
+		uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+		struct {
+			uint64_t cipher_IV_ptr;
+			uint64_t resrvd1;
+		} s;
+	} u;
+};
+
+struct icp_qat_fw_la_auth_req_params {
+	uint32_t auth_off;
+	uint32_t auth_len;
+	union {
+		uint64_t auth_partial_st_prefix;
+		uint64_t aad_adr;
+	} u1;
+	uint64_t auth_res_addr;
+	union {
+		uint8_t inner_prefix_sz;
+		uint8_t aad_sz;
+	} u2;
+	uint8_t resrvd1;
+	uint8_t hash_state_sz;
+	uint8_t auth_res_sz;
+} __packed;
+
+struct icp_qat_fw_la_auth_req_params_resrvd_flds {
+	uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6];
+	union {
+		uint8_t inner_prefix_sz;
+		uint8_t aad_sz;
+	} u2;
+	uint8_t resrvd1;
+	uint16_t resrvd2;
+};
+
+struct icp_qat_fw_la_resp {
+	struct icp_qat_fw_comn_resp_hdr comn_resp;
+	uint64_t opaque_data;
+	uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \
+	((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \
+	  ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+	((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+	& ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+	((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+	& ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \
+	(((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+	& ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+	((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+	& ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+	((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \
+	((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+	>> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+	((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+	& ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+	((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+	& ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \
+	(((cd_ctrl_hdr_t)->next_curr_id_auth) \
+	& ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+	((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+	& ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+	((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
new file mode 100644
index 0000000..2ffef3e
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
@@ -0,0 +1,88 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef __ICP_QAT_FW_LOADER_HANDLE_H__
+#define __ICP_QAT_FW_LOADER_HANDLE_H__
+#include "icp_qat_uclo.h"
+
+struct icp_qat_fw_loader_ae_data {
+	unsigned int state;
+	unsigned int ustore_size;
+	unsigned int free_addr;
+	unsigned int free_size;
+	unsigned int live_ctx_mask;
+};
+
+struct icp_qat_fw_loader_hal_handle {
+	struct icp_qat_fw_loader_ae_data aes[ICP_QAT_UCLO_MAX_AE];
+	unsigned int ae_mask;
+	unsigned int slice_mask;
+	unsigned int revision_id;
+	unsigned int ae_max_num;
+	unsigned int upc_mask;
+	unsigned int max_ustore;
+};
+
+struct icp_qat_fw_loader_handle {
+	struct icp_qat_fw_loader_hal_handle *hal_handle;
+	struct pci_dev *pci_dev;
+	void *obj_handle;
+	void *sobj_handle;
+	bool fw_auth;
+	void __iomem *hal_sram_addr_v;
+	void __iomem *hal_cap_g_ctl_csr_addr_v;
+	void __iomem *hal_cap_ae_xfer_csr_addr_v;
+	void __iomem *hal_cap_ae_local_csr_addr_v;
+	void __iomem *hal_ep_csr_addr_v;
+};
+
+struct icp_firml_dram_desc {
+	void __iomem *dram_base_addr;
+	void *dram_base_addr_v;
+	dma_addr_t dram_bus_addr;
+	u64 dram_size;
+};
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h
new file mode 100644
index 0000000..0d7a9b5
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h
@@ -0,0 +1,112 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+	* Redistributions of source code must retain the above copyright
+	  notice, this list of conditions and the following disclaimer.
+	* Redistributions in binary form must reproduce the above copyright
+	  notice, this list of conditions and the following disclaimer in
+	  the documentation and/or other materials provided with the
+	  distribution.
+	* Neither the name of Intel Corporation nor the names of its
+	  contributors may be used to endorse or promote products derived
+	  from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_FW_PKE_
+#define _ICP_QAT_FW_PKE_
+
+#include "icp_qat_fw.h"
+
+struct icp_qat_fw_req_hdr_pke_cd_pars {
+	u64 content_desc_addr;
+	u32 content_desc_resrvd;
+	u32 func_id;
+};
+
+struct icp_qat_fw_req_pke_mid {
+	u64 opaque;
+	u64 src_data_addr;
+	u64 dest_data_addr;
+};
+
+struct icp_qat_fw_req_pke_hdr {
+	u8 resrvd1;
+	u8 resrvd2;
+	u8 service_type;
+	u8 hdr_flags;
+	u16 comn_req_flags;
+	u16 resrvd4;
+	struct icp_qat_fw_req_hdr_pke_cd_pars cd_pars;
+};
+
+struct icp_qat_fw_pke_request {
+	struct icp_qat_fw_req_pke_hdr pke_hdr;
+	struct icp_qat_fw_req_pke_mid pke_mid;
+	u8 output_param_count;
+	u8 input_param_count;
+	u16 resrvd1;
+	u32 resrvd2;
+	u64 next_req_adr;
+};
+
+struct icp_qat_fw_resp_pke_hdr {
+	u8 resrvd1;
+	u8 resrvd2;
+	u8 response_type;
+	u8 hdr_flags;
+	u16 comn_resp_flags;
+	u16 resrvd4;
+};
+
+struct icp_qat_fw_pke_resp {
+	struct icp_qat_fw_resp_pke_hdr pke_resp_hdr;
+	u64 opaque;
+	u64 src_data_addr;
+	u64 dest_data_addr;
+};
+
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS              7
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK                0x1
+#define ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(status_word) \
+	QAT_FIELD_GET(((status_word >> ICP_QAT_FW_COMN_ONE_BYTE_SHIFT) & \
+		ICP_QAT_FW_COMN_SINGLE_BYTE_MASK), \
+		QAT_COMN_RESP_PKE_STATUS_BITPOS, \
+		QAT_COMN_RESP_PKE_STATUS_MASK)
+
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(hdr_t, val) \
+	QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+		ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS, \
+		ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK)
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/icp_qat_hal.h b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/icp_qat_hal.h
new file mode 100644
index 0000000..7187917
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/icp_qat_hal.h
@@ -0,0 +1,156 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef __ICP_QAT_HAL_H
+#define __ICP_QAT_HAL_H
+#include "icp_qat_fw_loader_handle.h"
+
+enum hal_global_csr {
+	MISC_CONTROL = 0x04,
+	ICP_RESET = 0x0c,
+	ICP_GLOBAL_CLK_ENABLE = 0x50
+};
+
+enum hal_ae_csr {
+	USTORE_ADDRESS = 0x000,
+	USTORE_DATA_LOWER = 0x004,
+	USTORE_DATA_UPPER = 0x008,
+	ALU_OUT = 0x010,
+	CTX_ARB_CNTL = 0x014,
+	CTX_ENABLES = 0x018,
+	CC_ENABLE = 0x01c,
+	CSR_CTX_POINTER = 0x020,
+	CTX_STS_INDIRECT = 0x040,
+	ACTIVE_CTX_STATUS = 0x044,
+	CTX_SIG_EVENTS_INDIRECT = 0x048,
+	CTX_SIG_EVENTS_ACTIVE = 0x04c,
+	CTX_WAKEUP_EVENTS_INDIRECT = 0x050,
+	LM_ADDR_0_INDIRECT = 0x060,
+	LM_ADDR_1_INDIRECT = 0x068,
+	INDIRECT_LM_ADDR_0_BYTE_INDEX = 0x0e0,
+	INDIRECT_LM_ADDR_1_BYTE_INDEX = 0x0e8,
+	FUTURE_COUNT_SIGNAL_INDIRECT = 0x078,
+	TIMESTAMP_LOW = 0x0c0,
+	TIMESTAMP_HIGH = 0x0c4,
+	PROFILE_COUNT = 0x144,
+	SIGNATURE_ENABLE = 0x150,
+	AE_MISC_CONTROL = 0x160,
+	LOCAL_CSR_STATUS = 0x180,
+};
+
+enum fcu_csr {
+	FCU_CONTROL           = 0x8c0,
+	FCU_STATUS            = 0x8c4,
+	FCU_STATUS1           = 0x8c8,
+	FCU_DRAM_ADDR_LO      = 0x8cc,
+	FCU_DRAM_ADDR_HI      = 0x8d0,
+	FCU_RAMBASE_ADDR_HI   = 0x8d4,
+	FCU_RAMBASE_ADDR_LO   = 0x8d8
+};
+
+enum fcu_cmd {
+	FCU_CTRL_CMD_NOOP  = 0,
+	FCU_CTRL_CMD_AUTH  = 1,
+	FCU_CTRL_CMD_LOAD  = 2,
+	FCU_CTRL_CMD_START = 3
+};
+
+enum fcu_sts {
+	FCU_STS_NO_STS    = 0,
+	FCU_STS_VERI_DONE = 1,
+	FCU_STS_LOAD_DONE = 2,
+	FCU_STS_VERI_FAIL = 3,
+	FCU_STS_LOAD_FAIL = 4,
+	FCU_STS_BUSY      = 5
+};
+#define UA_ECS                      (0x1 << 31)
+#define ACS_ABO_BITPOS              31
+#define ACS_ACNO                    0x7
+#define CE_ENABLE_BITPOS            0x8
+#define CE_LMADDR_0_GLOBAL_BITPOS   16
+#define CE_LMADDR_1_GLOBAL_BITPOS   17
+#define CE_NN_MODE_BITPOS           20
+#define CE_REG_PAR_ERR_BITPOS       25
+#define CE_BREAKPOINT_BITPOS        27
+#define CE_CNTL_STORE_PARITY_ERROR_BITPOS 29
+#define CE_INUSE_CONTEXTS_BITPOS    31
+#define CE_NN_MODE                  (0x1 << CE_NN_MODE_BITPOS)
+#define CE_INUSE_CONTEXTS           (0x1 << CE_INUSE_CONTEXTS_BITPOS)
+#define XCWE_VOLUNTARY              (0x1)
+#define LCS_STATUS          (0x1)
+#define MMC_SHARE_CS_BITPOS         2
+#define GLOBAL_CSR                0xA00
+#define FCU_CTRL_AE_POS     0x8
+#define FCU_AUTH_STS_MASK   0x7
+#define FCU_STS_DONE_POS    0x9
+#define FCU_STS_AUTHFWLD_POS 0X8
+#define FCU_LOADED_AE_POS   0x16
+#define FW_AUTH_WAIT_PERIOD 10
+#define FW_AUTH_MAX_RETRY   300
+
+#define SET_CAP_CSR(handle, csr, val) \
+	ADF_CSR_WR(handle->hal_cap_g_ctl_csr_addr_v, csr, val)
+#define GET_CAP_CSR(handle, csr) \
+	ADF_CSR_RD(handle->hal_cap_g_ctl_csr_addr_v, csr)
+#define SET_GLB_CSR(handle, csr, val) SET_CAP_CSR(handle, csr + GLOBAL_CSR, val)
+#define GET_GLB_CSR(handle, csr) GET_CAP_CSR(handle, GLOBAL_CSR + csr)
+#define AE_CSR(handle, ae) \
+	((char __iomem *)handle->hal_cap_ae_local_csr_addr_v + \
+	((ae & handle->hal_handle->ae_mask) << 12))
+#define AE_CSR_ADDR(handle, ae, csr) (AE_CSR(handle, ae) + (0x3ff & csr))
+#define SET_AE_CSR(handle, ae, csr, val) \
+	ADF_CSR_WR(AE_CSR_ADDR(handle, ae, csr), 0, val)
+#define GET_AE_CSR(handle, ae, csr) ADF_CSR_RD(AE_CSR_ADDR(handle, ae, csr), 0)
+#define AE_XFER(handle, ae) \
+	((char __iomem *)handle->hal_cap_ae_xfer_csr_addr_v + \
+	((ae & handle->hal_handle->ae_mask) << 12))
+#define AE_XFER_ADDR(handle, ae, reg) (AE_XFER(handle, ae) + \
+	((reg & 0xff) << 2))
+#define SET_AE_XFER(handle, ae, reg, val) \
+	ADF_CSR_WR(AE_XFER_ADDR(handle, ae, reg), 0, val)
+#define SRAM_WRITE(handle, addr, val) \
+	ADF_CSR_WR(handle->hal_sram_addr_v, addr, val)
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/icp_qat_hw.h b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/icp_qat_hw.h
new file mode 100644
index 0000000..121d5e6
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/icp_qat_hw.h
@@ -0,0 +1,305 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_HW_H_
+#define _ICP_QAT_HW_H_
+
+enum icp_qat_hw_ae_id {
+	ICP_QAT_HW_AE_0 = 0,
+	ICP_QAT_HW_AE_1 = 1,
+	ICP_QAT_HW_AE_2 = 2,
+	ICP_QAT_HW_AE_3 = 3,
+	ICP_QAT_HW_AE_4 = 4,
+	ICP_QAT_HW_AE_5 = 5,
+	ICP_QAT_HW_AE_6 = 6,
+	ICP_QAT_HW_AE_7 = 7,
+	ICP_QAT_HW_AE_8 = 8,
+	ICP_QAT_HW_AE_9 = 9,
+	ICP_QAT_HW_AE_10 = 10,
+	ICP_QAT_HW_AE_11 = 11,
+	ICP_QAT_HW_AE_DELIMITER = 12
+};
+
+enum icp_qat_hw_qat_id {
+	ICP_QAT_HW_QAT_0 = 0,
+	ICP_QAT_HW_QAT_1 = 1,
+	ICP_QAT_HW_QAT_2 = 2,
+	ICP_QAT_HW_QAT_3 = 3,
+	ICP_QAT_HW_QAT_4 = 4,
+	ICP_QAT_HW_QAT_5 = 5,
+	ICP_QAT_HW_QAT_DELIMITER = 6
+};
+
+enum icp_qat_hw_auth_algo {
+	ICP_QAT_HW_AUTH_ALGO_NULL = 0,
+	ICP_QAT_HW_AUTH_ALGO_SHA1 = 1,
+	ICP_QAT_HW_AUTH_ALGO_MD5 = 2,
+	ICP_QAT_HW_AUTH_ALGO_SHA224 = 3,
+	ICP_QAT_HW_AUTH_ALGO_SHA256 = 4,
+	ICP_QAT_HW_AUTH_ALGO_SHA384 = 5,
+	ICP_QAT_HW_AUTH_ALGO_SHA512 = 6,
+	ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7,
+	ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8,
+	ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9,
+	ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10,
+	ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11,
+	ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12,
+	ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13,
+	ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14,
+	ICP_QAT_HW_AUTH_RESERVED_1 = 15,
+	ICP_QAT_HW_AUTH_RESERVED_2 = 16,
+	ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17,
+	ICP_QAT_HW_AUTH_RESERVED_3 = 18,
+	ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19,
+	ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20
+};
+
+enum icp_qat_hw_auth_mode {
+	ICP_QAT_HW_AUTH_MODE0 = 0,
+	ICP_QAT_HW_AUTH_MODE1 = 1,
+	ICP_QAT_HW_AUTH_MODE2 = 2,
+	ICP_QAT_HW_AUTH_MODE_DELIMITER = 3
+};
+
+struct icp_qat_hw_auth_config {
+	uint32_t config;
+	uint32_t reserved;
+};
+
+#define QAT_AUTH_MODE_BITPOS 4
+#define QAT_AUTH_MODE_MASK 0xF
+#define QAT_AUTH_ALGO_BITPOS 0
+#define QAT_AUTH_ALGO_MASK 0xF
+#define QAT_AUTH_CMP_BITPOS 8
+#define QAT_AUTH_CMP_MASK 0x7F
+#define QAT_AUTH_SHA3_PADDING_BITPOS 16
+#define QAT_AUTH_SHA3_PADDING_MASK 0x1
+#define QAT_AUTH_ALGO_SHA3_BITPOS 22
+#define QAT_AUTH_ALGO_SHA3_MASK 0x3
+#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
+	(((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
+	((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
+	(((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \
+	 QAT_AUTH_ALGO_SHA3_BITPOS) | \
+	 (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \
+	(algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \
+	& QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \
+	((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
+
+struct icp_qat_hw_auth_counter {
+	__be32 counter;
+	uint32_t reserved;
+};
+
+#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF
+#define QAT_AUTH_COUNT_BITPOS 0
+#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \
+	(((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS)
+
+struct icp_qat_hw_auth_setup {
+	struct icp_qat_hw_auth_config auth_config;
+	struct icp_qat_hw_auth_counter auth_counter;
+};
+
+#define QAT_HW_DEFAULT_ALIGNMENT 8
+#define QAT_HW_ROUND_UP(val, n) (((val) + ((n) - 1)) & (~(n - 1)))
+#define ICP_QAT_HW_NULL_STATE1_SZ 32
+#define ICP_QAT_HW_MD5_STATE1_SZ 16
+#define ICP_QAT_HW_SHA1_STATE1_SZ 20
+#define ICP_QAT_HW_SHA224_STATE1_SZ 32
+#define ICP_QAT_HW_SHA256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA384_STATE1_SZ 64
+#define ICP_QAT_HW_SHA512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
+#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
+#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_F9_STATE1_SZ 32
+#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16
+#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
+#define ICP_QAT_HW_NULL_STATE2_SZ 32
+#define ICP_QAT_HW_MD5_STATE2_SZ 16
+#define ICP_QAT_HW_SHA1_STATE2_SZ 20
+#define ICP_QAT_HW_SHA224_STATE2_SZ 32
+#define ICP_QAT_HW_SHA256_STATE2_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0
+#define ICP_QAT_HW_SHA384_STATE2_SZ 64
+#define ICP_QAT_HW_SHA512_STATE2_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0
+#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
+#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
+#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16
+#define ICP_QAT_HW_F9_IK_SZ 16
+#define ICP_QAT_HW_F9_FK_SZ 16
+#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \
+	ICP_QAT_HW_F9_FK_SZ)
+#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32
+#define ICP_QAT_HW_GALOIS_H_SZ 16
+#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8
+#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16
+
+struct icp_qat_hw_auth_sha512 {
+	struct icp_qat_hw_auth_setup inner_setup;
+	uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ];
+	struct icp_qat_hw_auth_setup outer_setup;
+	uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ];
+};
+
+struct icp_qat_hw_auth_algo_blk {
+	struct icp_qat_hw_auth_sha512 sha;
+};
+
+#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0
+#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF
+
+enum icp_qat_hw_cipher_algo {
+	ICP_QAT_HW_CIPHER_ALGO_NULL = 0,
+	ICP_QAT_HW_CIPHER_ALGO_DES = 1,
+	ICP_QAT_HW_CIPHER_ALGO_3DES = 2,
+	ICP_QAT_HW_CIPHER_ALGO_AES128 = 3,
+	ICP_QAT_HW_CIPHER_ALGO_AES192 = 4,
+	ICP_QAT_HW_CIPHER_ALGO_AES256 = 5,
+	ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6,
+	ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7,
+	ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8,
+	ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9,
+	ICP_QAT_HW_CIPHER_DELIMITER = 10
+};
+
+enum icp_qat_hw_cipher_mode {
+	ICP_QAT_HW_CIPHER_ECB_MODE = 0,
+	ICP_QAT_HW_CIPHER_CBC_MODE = 1,
+	ICP_QAT_HW_CIPHER_CTR_MODE = 2,
+	ICP_QAT_HW_CIPHER_F8_MODE = 3,
+	ICP_QAT_HW_CIPHER_XTS_MODE = 6,
+	ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7
+};
+
+struct icp_qat_hw_cipher_config {
+	uint32_t val;
+	uint32_t reserved;
+};
+
+enum icp_qat_hw_cipher_dir {
+	ICP_QAT_HW_CIPHER_ENCRYPT = 0,
+	ICP_QAT_HW_CIPHER_DECRYPT = 1,
+};
+
+enum icp_qat_hw_cipher_convert {
+	ICP_QAT_HW_CIPHER_NO_CONVERT = 0,
+	ICP_QAT_HW_CIPHER_KEY_CONVERT = 1,
+};
+
+#define QAT_CIPHER_MODE_BITPOS 4
+#define QAT_CIPHER_MODE_MASK 0xF
+#define QAT_CIPHER_ALGO_BITPOS 0
+#define QAT_CIPHER_ALGO_MASK 0xF
+#define QAT_CIPHER_CONVERT_BITPOS 9
+#define QAT_CIPHER_CONVERT_MASK 0x1
+#define QAT_CIPHER_DIR_BITPOS 8
+#define QAT_CIPHER_DIR_MASK 0x1
+#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2
+#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2
+#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \
+	(((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \
+	((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \
+	((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \
+	((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS))
+#define ICP_QAT_HW_DES_BLK_SZ 8
+#define ICP_QAT_HW_3DES_BLK_SZ 8
+#define ICP_QAT_HW_NULL_BLK_SZ 8
+#define ICP_QAT_HW_AES_BLK_SZ 16
+#define ICP_QAT_HW_KASUMI_BLK_SZ 8
+#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8
+#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8
+#define ICP_QAT_HW_NULL_KEY_SZ 256
+#define ICP_QAT_HW_DES_KEY_SZ 8
+#define ICP_QAT_HW_3DES_KEY_SZ 24
+#define ICP_QAT_HW_AES_128_KEY_SZ 16
+#define ICP_QAT_HW_AES_192_KEY_SZ 24
+#define ICP_QAT_HW_AES_256_KEY_SZ 32
+#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+	QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \
+	QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+	QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+	QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+	QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_KASUMI_KEY_SZ 16
+#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \
+	QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+	QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+	QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_ARC4_KEY_SZ 256
+#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16
+#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2
+#define INIT_SHRAM_CONSTANTS_TABLE_SZ 1024
+
+struct icp_qat_hw_cipher_aes256_f8 {
+	struct icp_qat_hw_cipher_config cipher_config;
+	uint8_t key[ICP_QAT_HW_AES_256_F8_KEY_SZ];
+};
+
+struct icp_qat_hw_cipher_algo_blk {
+	struct icp_qat_hw_cipher_aes256_f8 aes;
+} __aligned(64);
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/icp_qat_uclo.h
new file mode 100644
index 0000000..5d1ee7e
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/icp_qat_uclo.h
@@ -0,0 +1,528 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef __ICP_QAT_UCLO_H__
+#define __ICP_QAT_UCLO_H__
+
+#define ICP_QAT_AC_895XCC_DEV_TYPE 0x00400000
+#define ICP_QAT_AC_C62X_DEV_TYPE   0x01000000
+#define ICP_QAT_AC_C3XXX_DEV_TYPE  0x02000000
+#define ICP_QAT_UCLO_MAX_AE       12
+#define ICP_QAT_UCLO_MAX_CTX      8
+#define ICP_QAT_UCLO_MAX_UIMAGE   (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX)
+#define ICP_QAT_UCLO_MAX_USTORE   0x4000
+#define ICP_QAT_UCLO_MAX_XFER_REG 128
+#define ICP_QAT_UCLO_MAX_GPR_REG  128
+#define ICP_QAT_UCLO_MAX_LMEM_REG 1024
+#define ICP_QAT_UCLO_AE_ALL_CTX   0xff
+#define ICP_QAT_UOF_OBJID_LEN     8
+#define ICP_QAT_UOF_FID 0xc6c2
+#define ICP_QAT_UOF_MAJVER 0x4
+#define ICP_QAT_UOF_MINVER 0x11
+#define ICP_QAT_UOF_OBJS        "UOF_OBJS"
+#define ICP_QAT_UOF_STRT        "UOF_STRT"
+#define ICP_QAT_UOF_IMAG        "UOF_IMAG"
+#define ICP_QAT_UOF_IMEM        "UOF_IMEM"
+#define ICP_QAT_UOF_LOCAL_SCOPE     1
+#define ICP_QAT_UOF_INIT_EXPR               0
+#define ICP_QAT_UOF_INIT_REG                1
+#define ICP_QAT_UOF_INIT_REG_CTX            2
+#define ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP   3
+#define ICP_QAT_SUOF_OBJ_ID_LEN             8
+#define ICP_QAT_SUOF_FID  0x53554f46
+#define ICP_QAT_SUOF_MAJVER 0x0
+#define ICP_QAT_SUOF_MINVER 0x1
+#define ICP_QAT_SIMG_AE_INIT_SEQ_LEN    (50 * sizeof(unsigned long long))
+#define ICP_QAT_SIMG_AE_INSTS_LEN       (0x4000 * sizeof(unsigned long long))
+#define ICP_QAT_CSS_FWSK_MODULUS_LEN    256
+#define ICP_QAT_CSS_FWSK_EXPONENT_LEN   4
+#define ICP_QAT_CSS_FWSK_PAD_LEN        252
+#define ICP_QAT_CSS_FWSK_PUB_LEN   (ICP_QAT_CSS_FWSK_MODULUS_LEN + \
+				    ICP_QAT_CSS_FWSK_EXPONENT_LEN + \
+				    ICP_QAT_CSS_FWSK_PAD_LEN)
+#define ICP_QAT_CSS_SIGNATURE_LEN   256
+#define ICP_QAT_CSS_AE_IMG_LEN     (sizeof(struct icp_qat_simg_ae_mode) + \
+				    ICP_QAT_SIMG_AE_INIT_SEQ_LEN +         \
+				    ICP_QAT_SIMG_AE_INSTS_LEN)
+#define ICP_QAT_CSS_AE_SIMG_LEN    (sizeof(struct icp_qat_css_hdr) + \
+				    ICP_QAT_CSS_FWSK_PUB_LEN + \
+				    ICP_QAT_CSS_SIGNATURE_LEN + \
+				    ICP_QAT_CSS_AE_IMG_LEN)
+#define ICP_QAT_AE_IMG_OFFSET	   (sizeof(struct icp_qat_css_hdr) + \
+				    ICP_QAT_CSS_FWSK_MODULUS_LEN + \
+				    ICP_QAT_CSS_FWSK_EXPONENT_LEN + \
+				    ICP_QAT_CSS_SIGNATURE_LEN)
+#define ICP_QAT_CSS_MAX_IMAGE_LEN   0x40000
+
+#define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf)
+#define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf)
+#define ICP_QAT_SHARED_USTORE_MODE(ae_mode) (((ae_mode) >> 0xb) & 0x1)
+#define RELOADABLE_CTX_SHARED_MODE(ae_mode) (((ae_mode) >> 0xc) & 0x1)
+
+#define ICP_QAT_LOC_MEM0_MODE(ae_mode) (((ae_mode) >> 0x8) & 0x1)
+#define ICP_QAT_LOC_MEM1_MODE(ae_mode) (((ae_mode) >> 0x9) & 0x1)
+
+enum icp_qat_uof_mem_region {
+	ICP_QAT_UOF_SRAM_REGION = 0x0,
+	ICP_QAT_UOF_LMEM_REGION = 0x3,
+	ICP_QAT_UOF_UMEM_REGION = 0x5
+};
+
+enum icp_qat_uof_regtype {
+	ICP_NO_DEST	= 0,
+	ICP_GPA_REL	= 1,
+	ICP_GPA_ABS	= 2,
+	ICP_GPB_REL	= 3,
+	ICP_GPB_ABS	= 4,
+	ICP_SR_REL	= 5,
+	ICP_SR_RD_REL	= 6,
+	ICP_SR_WR_REL	= 7,
+	ICP_SR_ABS	= 8,
+	ICP_SR_RD_ABS	= 9,
+	ICP_SR_WR_ABS	= 10,
+	ICP_DR_REL	= 19,
+	ICP_DR_RD_REL	= 20,
+	ICP_DR_WR_REL	= 21,
+	ICP_DR_ABS	= 22,
+	ICP_DR_RD_ABS	= 23,
+	ICP_DR_WR_ABS	= 24,
+	ICP_LMEM	= 26,
+	ICP_LMEM0	= 27,
+	ICP_LMEM1	= 28,
+	ICP_NEIGH_REL	= 31,
+};
+
+enum icp_qat_css_fwtype {
+	CSS_AE_FIRMWARE = 0,
+	CSS_MMP_FIRMWARE = 1
+};
+
+struct icp_qat_uclo_page {
+	struct icp_qat_uclo_encap_page *encap_page;
+	struct icp_qat_uclo_region *region;
+	unsigned int flags;
+};
+
+struct icp_qat_uclo_region {
+	struct icp_qat_uclo_page *loaded;
+	struct icp_qat_uclo_page *page;
+};
+
+struct icp_qat_uclo_aeslice {
+	struct icp_qat_uclo_region *region;
+	struct icp_qat_uclo_page *page;
+	struct icp_qat_uclo_page *cur_page[ICP_QAT_UCLO_MAX_CTX];
+	struct icp_qat_uclo_encapme *encap_image;
+	unsigned int ctx_mask_assigned;
+	unsigned int new_uaddr[ICP_QAT_UCLO_MAX_CTX];
+};
+
+struct icp_qat_uclo_aedata {
+	unsigned int slice_num;
+	unsigned int eff_ustore_size;
+	struct icp_qat_uclo_aeslice ae_slices[ICP_QAT_UCLO_MAX_CTX];
+};
+
+struct icp_qat_uof_encap_obj {
+	char *beg_uof;
+	struct icp_qat_uof_objhdr *obj_hdr;
+	struct icp_qat_uof_chunkhdr *chunk_hdr;
+	struct icp_qat_uof_varmem_seg *var_mem_seg;
+};
+
+struct icp_qat_uclo_encap_uwblock {
+	unsigned int start_addr;
+	unsigned int words_num;
+	uint64_t micro_words;
+};
+
+struct icp_qat_uclo_encap_page {
+	unsigned int def_page;
+	unsigned int page_region;
+	unsigned int beg_addr_v;
+	unsigned int beg_addr_p;
+	unsigned int micro_words_num;
+	unsigned int uwblock_num;
+	struct icp_qat_uclo_encap_uwblock *uwblock;
+};
+
+struct icp_qat_uclo_encapme {
+	struct icp_qat_uof_image *img_ptr;
+	struct icp_qat_uclo_encap_page *page;
+	unsigned int ae_reg_num;
+	struct icp_qat_uof_ae_reg *ae_reg;
+	unsigned int init_regsym_num;
+	struct icp_qat_uof_init_regsym *init_regsym;
+	unsigned int sbreak_num;
+	struct icp_qat_uof_sbreak *sbreak;
+	unsigned int uwords_num;
+};
+
+struct icp_qat_uclo_init_mem_table {
+	unsigned int entry_num;
+	struct icp_qat_uof_initmem *init_mem;
+};
+
+struct icp_qat_uclo_objhdr {
+	char *file_buff;
+	unsigned int checksum;
+	unsigned int size;
+};
+
+struct icp_qat_uof_strtable {
+	unsigned int table_len;
+	unsigned int reserved;
+	uint64_t strings;
+};
+
+struct icp_qat_uclo_objhandle {
+	unsigned int prod_type;
+	unsigned int prod_rev;
+	struct icp_qat_uclo_objhdr *obj_hdr;
+	struct icp_qat_uof_encap_obj encap_uof_obj;
+	struct icp_qat_uof_strtable str_table;
+	struct icp_qat_uclo_encapme ae_uimage[ICP_QAT_UCLO_MAX_UIMAGE];
+	struct icp_qat_uclo_aedata ae_data[ICP_QAT_UCLO_MAX_AE];
+	struct icp_qat_uclo_init_mem_table init_mem_tab;
+	struct icp_qat_uof_batch_init *lm_init_tab[ICP_QAT_UCLO_MAX_AE];
+	struct icp_qat_uof_batch_init *umem_init_tab[ICP_QAT_UCLO_MAX_AE];
+	int uimage_num;
+	int uword_in_bytes;
+	int global_inited;
+	unsigned int ae_num;
+	unsigned int ustore_phy_size;
+	void *obj_buf;
+	uint64_t *uword_buf;
+};
+
+struct icp_qat_uof_uword_block {
+	unsigned int start_addr;
+	unsigned int words_num;
+	unsigned int uword_offset;
+	unsigned int reserved;
+};
+
+struct icp_qat_uof_filehdr {
+	unsigned short file_id;
+	unsigned short reserved1;
+	char min_ver;
+	char maj_ver;
+	unsigned short reserved2;
+	unsigned short max_chunks;
+	unsigned short num_chunks;
+};
+
+struct icp_qat_uof_filechunkhdr {
+	char chunk_id[ICP_QAT_UOF_OBJID_LEN];
+	unsigned int checksum;
+	unsigned int offset;
+	unsigned int size;
+};
+
+struct icp_qat_uof_objhdr {
+	unsigned int ac_dev_type;
+	unsigned short min_cpu_ver;
+	unsigned short max_cpu_ver;
+	short max_chunks;
+	short num_chunks;
+	unsigned int reserved1;
+	unsigned int reserved2;
+};
+
+struct icp_qat_uof_chunkhdr {
+	char chunk_id[ICP_QAT_UOF_OBJID_LEN];
+	unsigned int offset;
+	unsigned int size;
+};
+
+struct icp_qat_uof_memvar_attr {
+	unsigned int offset_in_byte;
+	unsigned int value;
+};
+
+struct icp_qat_uof_initmem {
+	unsigned int sym_name;
+	char region;
+	char scope;
+	unsigned short reserved1;
+	unsigned int addr;
+	unsigned int num_in_bytes;
+	unsigned int val_attr_num;
+};
+
+struct icp_qat_uof_init_regsym {
+	unsigned int sym_name;
+	char init_type;
+	char value_type;
+	char reg_type;
+	unsigned char ctx;
+	unsigned int reg_addr;
+	unsigned int value;
+};
+
+struct icp_qat_uof_varmem_seg {
+	unsigned int sram_base;
+	unsigned int sram_size;
+	unsigned int sram_alignment;
+	unsigned int sdram_base;
+	unsigned int sdram_size;
+	unsigned int sdram_alignment;
+	unsigned int sdram1_base;
+	unsigned int sdram1_size;
+	unsigned int sdram1_alignment;
+	unsigned int scratch_base;
+	unsigned int scratch_size;
+	unsigned int scratch_alignment;
+};
+
+struct icp_qat_uof_gtid {
+	char tool_id[ICP_QAT_UOF_OBJID_LEN];
+	int tool_ver;
+	unsigned int reserved1;
+	unsigned int reserved2;
+};
+
+struct icp_qat_uof_sbreak {
+	unsigned int page_num;
+	unsigned int virt_uaddr;
+	unsigned char sbreak_type;
+	unsigned char reg_type;
+	unsigned short reserved1;
+	unsigned int addr_offset;
+	unsigned int reg_addr;
+};
+
+struct icp_qat_uof_code_page {
+	unsigned int page_region;
+	unsigned int page_num;
+	unsigned char def_page;
+	unsigned char reserved2;
+	unsigned short reserved1;
+	unsigned int beg_addr_v;
+	unsigned int beg_addr_p;
+	unsigned int neigh_reg_tab_offset;
+	unsigned int uc_var_tab_offset;
+	unsigned int imp_var_tab_offset;
+	unsigned int imp_expr_tab_offset;
+	unsigned int code_area_offset;
+};
+
+struct icp_qat_uof_image {
+	unsigned int img_name;
+	unsigned int ae_assigned;
+	unsigned int ctx_assigned;
+	unsigned int ac_dev_type;
+	unsigned int entry_address;
+	unsigned int fill_pattern[2];
+	unsigned int reloadable_size;
+	unsigned char sensitivity;
+	unsigned char reserved;
+	unsigned short ae_mode;
+	unsigned short max_ver;
+	unsigned short min_ver;
+	unsigned short image_attrib;
+	unsigned short reserved2;
+	unsigned short page_region_num;
+	unsigned short numpages;
+	unsigned int reg_tab_offset;
+	unsigned int init_reg_sym_tab;
+	unsigned int sbreak_tab;
+	unsigned int app_metadata;
+};
+
+struct icp_qat_uof_objtable {
+	unsigned int entry_num;
+};
+
+struct icp_qat_uof_ae_reg {
+	unsigned int name;
+	unsigned int vis_name;
+	unsigned short type;
+	unsigned short addr;
+	unsigned short access_mode;
+	unsigned char visible;
+	unsigned char reserved1;
+	unsigned short ref_count;
+	unsigned short reserved2;
+	unsigned int xo_id;
+};
+
+struct icp_qat_uof_code_area {
+	unsigned int micro_words_num;
+	unsigned int uword_block_tab;
+};
+
+struct icp_qat_uof_batch_init {
+	unsigned int ae;
+	unsigned int addr;
+	unsigned int *value;
+	unsigned int size;
+	struct icp_qat_uof_batch_init *next;
+};
+
+struct icp_qat_suof_img_hdr {
+	char          *simg_buf;
+	unsigned long simg_len;
+	char          *css_header;
+	char          *css_key;
+	char          *css_signature;
+	char          *css_simg;
+	unsigned long simg_size;
+	unsigned int  ae_num;
+	unsigned int  ae_mask;
+	unsigned int  fw_type;
+	unsigned long simg_name;
+	unsigned long appmeta_data;
+};
+
+struct icp_qat_suof_img_tbl {
+	unsigned int num_simgs;
+	struct icp_qat_suof_img_hdr *simg_hdr;
+};
+
+struct icp_qat_suof_handle {
+	unsigned int  file_id;
+	unsigned int  check_sum;
+	char          min_ver;
+	char          maj_ver;
+	char          fw_type;
+	char          *suof_buf;
+	unsigned int  suof_size;
+	char          *sym_str;
+	unsigned int  sym_size;
+	struct icp_qat_suof_img_tbl img_table;
+};
+
+struct icp_qat_fw_auth_desc {
+	unsigned int   img_len;
+	unsigned int   reserved;
+	unsigned int   css_hdr_high;
+	unsigned int   css_hdr_low;
+	unsigned int   img_high;
+	unsigned int   img_low;
+	unsigned int   signature_high;
+	unsigned int   signature_low;
+	unsigned int   fwsk_pub_high;
+	unsigned int   fwsk_pub_low;
+	unsigned int   img_ae_mode_data_high;
+	unsigned int   img_ae_mode_data_low;
+	unsigned int   img_ae_init_data_high;
+	unsigned int   img_ae_init_data_low;
+	unsigned int   img_ae_insts_high;
+	unsigned int   img_ae_insts_low;
+};
+
+struct icp_qat_auth_chunk {
+	struct icp_qat_fw_auth_desc fw_auth_desc;
+	u64 chunk_size;
+	u64 chunk_bus_addr;
+};
+
+struct icp_qat_css_hdr {
+	unsigned int module_type;
+	unsigned int header_len;
+	unsigned int header_ver;
+	unsigned int module_id;
+	unsigned int module_vendor;
+	unsigned int date;
+	unsigned int size;
+	unsigned int key_size;
+	unsigned int module_size;
+	unsigned int exponent_size;
+	unsigned int fw_type;
+	unsigned int reserved[21];
+};
+
+struct icp_qat_simg_ae_mode {
+	unsigned int     file_id;
+	unsigned short   maj_ver;
+	unsigned short   min_ver;
+	unsigned int     dev_type;
+	unsigned short   devmax_ver;
+	unsigned short   devmin_ver;
+	unsigned int     ae_mask;
+	unsigned int     ctx_enables;
+	char             fw_type;
+	char             ctx_mode;
+	char             nn_mode;
+	char             lm0_mode;
+	char             lm1_mode;
+	char             scs_mode;
+	char             lm2_mode;
+	char             lm3_mode;
+	char             tindex_mode;
+	unsigned char    reserved[7];
+	char             simg_name[256];
+	char             appmeta_data[256];
+};
+
+struct icp_qat_suof_filehdr {
+	unsigned int     file_id;
+	unsigned int     check_sum;
+	char             min_ver;
+	char             maj_ver;
+	char             fw_type;
+	char             reserved;
+	unsigned short   max_chunks;
+	unsigned short   num_chunks;
+};
+
+struct icp_qat_suof_chunk_hdr {
+	char chunk_id[ICP_QAT_SUOF_OBJ_ID_LEN];
+	u64 offset;
+	u64 size;
+};
+
+struct icp_qat_suof_strtable {
+	unsigned int tab_length;
+	unsigned int strings;
+};
+
+struct icp_qat_suof_objhdr {
+	unsigned int img_length;
+	unsigned int reserved;
+};
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/qat_algs.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/qat_algs.c
new file mode 100644
index 0000000..baffae8
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/qat_algs.c
@@ -0,0 +1,1315 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/crypto.h>
+#include <crypto/internal/aead.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/hmac.h>
+#include <crypto/algapi.h>
+#include <crypto/authenc.h>
+#include <linux/dma-mapping.h>
+#include "adf_accel_devices.h"
+#include "adf_transport.h"
+#include "adf_common_drv.h"
+#include "qat_crypto.h"
+#include "icp_qat_hw.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+
+#define QAT_AES_HW_CONFIG_ENC(alg, mode) \
+	ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
+				       ICP_QAT_HW_CIPHER_NO_CONVERT, \
+				       ICP_QAT_HW_CIPHER_ENCRYPT)
+
+#define QAT_AES_HW_CONFIG_DEC(alg, mode) \
+	ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
+				       ICP_QAT_HW_CIPHER_KEY_CONVERT, \
+				       ICP_QAT_HW_CIPHER_DECRYPT)
+
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
+
+struct qat_alg_buf {
+	uint32_t len;
+	uint32_t resrvd;
+	uint64_t addr;
+} __packed;
+
+struct qat_alg_buf_list {
+	uint64_t resrvd;
+	uint32_t num_bufs;
+	uint32_t num_mapped_bufs;
+	struct qat_alg_buf bufers[];
+} __packed __aligned(64);
+
+/* Common content descriptor */
+struct qat_alg_cd {
+	union {
+		struct qat_enc { /* Encrypt content desc */
+			struct icp_qat_hw_cipher_algo_blk cipher;
+			struct icp_qat_hw_auth_algo_blk hash;
+		} qat_enc_cd;
+		struct qat_dec { /* Decrytp content desc */
+			struct icp_qat_hw_auth_algo_blk hash;
+			struct icp_qat_hw_cipher_algo_blk cipher;
+		} qat_dec_cd;
+	};
+} __aligned(64);
+
+struct qat_alg_aead_ctx {
+	struct qat_alg_cd *enc_cd;
+	struct qat_alg_cd *dec_cd;
+	dma_addr_t enc_cd_paddr;
+	dma_addr_t dec_cd_paddr;
+	struct icp_qat_fw_la_bulk_req enc_fw_req;
+	struct icp_qat_fw_la_bulk_req dec_fw_req;
+	struct crypto_shash *hash_tfm;
+	enum icp_qat_hw_auth_algo qat_hash_alg;
+	struct qat_crypto_instance *inst;
+};
+
+struct qat_alg_ablkcipher_ctx {
+	struct icp_qat_hw_cipher_algo_blk *enc_cd;
+	struct icp_qat_hw_cipher_algo_blk *dec_cd;
+	dma_addr_t enc_cd_paddr;
+	dma_addr_t dec_cd_paddr;
+	struct icp_qat_fw_la_bulk_req enc_fw_req;
+	struct icp_qat_fw_la_bulk_req dec_fw_req;
+	struct qat_crypto_instance *inst;
+	struct crypto_tfm *tfm;
+	spinlock_t lock;	/* protects qat_alg_ablkcipher_ctx struct */
+};
+
+static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
+{
+	switch (qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SHA1:
+		return ICP_QAT_HW_SHA1_STATE1_SZ;
+	case ICP_QAT_HW_AUTH_ALGO_SHA256:
+		return ICP_QAT_HW_SHA256_STATE1_SZ;
+	case ICP_QAT_HW_AUTH_ALGO_SHA512:
+		return ICP_QAT_HW_SHA512_STATE1_SZ;
+	default:
+		return -EFAULT;
+	};
+	return -EFAULT;
+}
+
+static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
+				  struct qat_alg_aead_ctx *ctx,
+				  const uint8_t *auth_key,
+				  unsigned int auth_keylen)
+{
+	SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
+	struct sha1_state sha1;
+	struct sha256_state sha256;
+	struct sha512_state sha512;
+	int block_size = crypto_shash_blocksize(ctx->hash_tfm);
+	int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
+	char ipad[block_size];
+	char opad[block_size];
+	__be32 *hash_state_out;
+	__be64 *hash512_state_out;
+	int i, offset;
+
+	memset(ipad, 0, block_size);
+	memset(opad, 0, block_size);
+	shash->tfm = ctx->hash_tfm;
+	shash->flags = 0x0;
+
+	if (auth_keylen > block_size) {
+		int ret = crypto_shash_digest(shash, auth_key,
+					      auth_keylen, ipad);
+		if (ret)
+			return ret;
+
+		memcpy(opad, ipad, digest_size);
+	} else {
+		memcpy(ipad, auth_key, auth_keylen);
+		memcpy(opad, auth_key, auth_keylen);
+	}
+
+	for (i = 0; i < block_size; i++) {
+		char *ipad_ptr = ipad + i;
+		char *opad_ptr = opad + i;
+		*ipad_ptr ^= HMAC_IPAD_VALUE;
+		*opad_ptr ^= HMAC_OPAD_VALUE;
+	}
+
+	if (crypto_shash_init(shash))
+		return -EFAULT;
+
+	if (crypto_shash_update(shash, ipad, block_size))
+		return -EFAULT;
+
+	hash_state_out = (__be32 *)hash->sha.state1;
+	hash512_state_out = (__be64 *)hash_state_out;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SHA1:
+		if (crypto_shash_export(shash, &sha1))
+			return -EFAULT;
+		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+			*hash_state_out = cpu_to_be32(*(sha1.state + i));
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA256:
+		if (crypto_shash_export(shash, &sha256))
+			return -EFAULT;
+		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+			*hash_state_out = cpu_to_be32(*(sha256.state + i));
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA512:
+		if (crypto_shash_export(shash, &sha512))
+			return -EFAULT;
+		for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
+			*hash512_state_out = cpu_to_be64(*(sha512.state + i));
+		break;
+	default:
+		return -EFAULT;
+	}
+
+	if (crypto_shash_init(shash))
+		return -EFAULT;
+
+	if (crypto_shash_update(shash, opad, block_size))
+		return -EFAULT;
+
+	offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
+	hash_state_out = (__be32 *)(hash->sha.state1 + offset);
+	hash512_state_out = (__be64 *)hash_state_out;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SHA1:
+		if (crypto_shash_export(shash, &sha1))
+			return -EFAULT;
+		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+			*hash_state_out = cpu_to_be32(*(sha1.state + i));
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA256:
+		if (crypto_shash_export(shash, &sha256))
+			return -EFAULT;
+		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+			*hash_state_out = cpu_to_be32(*(sha256.state + i));
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA512:
+		if (crypto_shash_export(shash, &sha512))
+			return -EFAULT;
+		for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
+			*hash512_state_out = cpu_to_be64(*(sha512.state + i));
+		break;
+	default:
+		return -EFAULT;
+	}
+	memzero_explicit(ipad, block_size);
+	memzero_explicit(opad, block_size);
+	return 0;
+}
+
+static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
+{
+	header->hdr_flags =
+		ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
+	header->comn_req_flags =
+		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
+					    QAT_COMN_PTR_TYPE_SGL);
+	ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
+				  ICP_QAT_FW_LA_PARTIAL_NONE);
+	ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
+					   ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
+	ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+	ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
+				       ICP_QAT_FW_LA_NO_UPDATE_STATE);
+}
+
+static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
+					 int alg,
+					 struct crypto_authenc_keys *keys,
+					 int mode)
+{
+	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
+	unsigned int digestsize = crypto_aead_authsize(aead_tfm);
+	struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
+	struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
+	struct icp_qat_hw_auth_algo_blk *hash =
+		(struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
+		sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
+	struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
+	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+	void *ptr = &req_tmpl->cd_ctrl;
+	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+	struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+
+	/* CD setup */
+	cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
+	memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
+	hash->sha.inner_setup.auth_config.config =
+		ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
+					     ctx->qat_hash_alg, digestsize);
+	hash->sha.inner_setup.auth_counter.counter =
+		cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+
+	if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
+		return -EFAULT;
+
+	/* Request setup */
+	qat_alg_init_common_hdr(header);
+	header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+	ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+					   ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+	ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+				   ICP_QAT_FW_LA_RET_AUTH_RES);
+	ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+				   ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+	cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
+	cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
+
+	/* Cipher CD config setup */
+	cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
+	cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
+	cipher_cd_ctrl->cipher_cfg_offset = 0;
+	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
+	/* Auth CD config setup */
+	hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
+	hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
+	hash_cd_ctrl->inner_res_sz = digestsize;
+	hash_cd_ctrl->final_sz = digestsize;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SHA1:
+		hash_cd_ctrl->inner_state1_sz =
+			round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
+		hash_cd_ctrl->inner_state2_sz =
+			round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA256:
+		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
+		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA512:
+		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
+		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
+		break;
+	default:
+		break;
+	}
+	hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
+			((sizeof(struct icp_qat_hw_auth_setup) +
+			 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
+	ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+	return 0;
+}
+
+static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
+					 int alg,
+					 struct crypto_authenc_keys *keys,
+					 int mode)
+{
+	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
+	unsigned int digestsize = crypto_aead_authsize(aead_tfm);
+	struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
+	struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
+	struct icp_qat_hw_cipher_algo_blk *cipher =
+		(struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
+		sizeof(struct icp_qat_hw_auth_setup) +
+		roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
+	struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
+	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+	void *ptr = &req_tmpl->cd_ctrl;
+	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+	struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+	struct icp_qat_fw_la_auth_req_params *auth_param =
+		(struct icp_qat_fw_la_auth_req_params *)
+		((char *)&req_tmpl->serv_specif_rqpars +
+		sizeof(struct icp_qat_fw_la_cipher_req_params));
+
+	/* CD setup */
+	cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
+	memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
+	hash->sha.inner_setup.auth_config.config =
+		ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
+					     ctx->qat_hash_alg,
+					     digestsize);
+	hash->sha.inner_setup.auth_counter.counter =
+		cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+
+	if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
+		return -EFAULT;
+
+	/* Request setup */
+	qat_alg_init_common_hdr(header);
+	header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+	ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+					   ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+	ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+				   ICP_QAT_FW_LA_NO_RET_AUTH_RES);
+	ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+				   ICP_QAT_FW_LA_CMP_AUTH_RES);
+	cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
+	cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
+
+	/* Cipher CD config setup */
+	cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
+	cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
+	cipher_cd_ctrl->cipher_cfg_offset =
+		(sizeof(struct icp_qat_hw_auth_setup) +
+		 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
+	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+
+	/* Auth CD config setup */
+	hash_cd_ctrl->hash_cfg_offset = 0;
+	hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
+	hash_cd_ctrl->inner_res_sz = digestsize;
+	hash_cd_ctrl->final_sz = digestsize;
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_SHA1:
+		hash_cd_ctrl->inner_state1_sz =
+			round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
+		hash_cd_ctrl->inner_state2_sz =
+			round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA256:
+		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
+		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA512:
+		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
+		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
+		break;
+	default:
+		break;
+	}
+
+	hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
+			((sizeof(struct icp_qat_hw_auth_setup) +
+			 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
+	auth_param->auth_res_sz = digestsize;
+	ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+	return 0;
+}
+
+static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
+					struct icp_qat_fw_la_bulk_req *req,
+					struct icp_qat_hw_cipher_algo_blk *cd,
+					const uint8_t *key, unsigned int keylen)
+{
+	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+	struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
+	struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
+
+	memcpy(cd->aes.key, key, keylen);
+	qat_alg_init_common_hdr(header);
+	header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
+	cd_pars->u.s.content_desc_params_sz =
+				sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
+	/* Cipher CD config setup */
+	cd_ctrl->cipher_key_sz = keylen >> 3;
+	cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
+	cd_ctrl->cipher_cfg_offset = 0;
+	ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+}
+
+static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
+					int alg, const uint8_t *key,
+					unsigned int keylen, int mode)
+{
+	struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
+	struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
+	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+
+	qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
+	cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
+	enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
+}
+
+static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
+					int alg, const uint8_t *key,
+					unsigned int keylen, int mode)
+{
+	struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
+	struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
+	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+
+	qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
+	cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
+
+	if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
+		dec_cd->aes.cipher_config.val =
+					QAT_AES_HW_CONFIG_DEC(alg, mode);
+	else
+		dec_cd->aes.cipher_config.val =
+					QAT_AES_HW_CONFIG_ENC(alg, mode);
+}
+
+static int qat_alg_validate_key(int key_len, int *alg, int mode)
+{
+	if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
+		switch (key_len) {
+		case AES_KEYSIZE_128:
+			*alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+			break;
+		case AES_KEYSIZE_192:
+			*alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
+			break;
+		case AES_KEYSIZE_256:
+			*alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
+			break;
+		default:
+			return -EINVAL;
+		}
+	} else {
+		switch (key_len) {
+		case AES_KEYSIZE_128 << 1:
+			*alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+			break;
+		case AES_KEYSIZE_256 << 1:
+			*alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
+				      unsigned int keylen,  int mode)
+{
+	struct crypto_authenc_keys keys;
+	int alg;
+
+	if (crypto_authenc_extractkeys(&keys, key, keylen))
+		goto bad_key;
+
+	if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
+		goto bad_key;
+
+	if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
+		goto error;
+
+	if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
+		goto error;
+
+	return 0;
+bad_key:
+	crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	return -EINVAL;
+error:
+	return -EFAULT;
+}
+
+static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
+					    const uint8_t *key,
+					    unsigned int keylen,
+					    int mode)
+{
+	int alg;
+
+	if (qat_alg_validate_key(keylen, &alg, mode))
+		goto bad_key;
+
+	qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode);
+	qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode);
+	return 0;
+bad_key:
+	crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	return -EINVAL;
+}
+
+static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
+			       unsigned int keylen)
+{
+	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct device *dev;
+
+	if (ctx->enc_cd) {
+		/* rekeying */
+		dev = &GET_DEV(ctx->inst->accel_dev);
+		memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+		memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+		memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
+		memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
+	} else {
+		/* new key */
+		int node = get_current_node();
+		struct qat_crypto_instance *inst =
+				qat_crypto_get_instance_node(node);
+		if (!inst) {
+			return -EINVAL;
+		}
+
+		dev = &GET_DEV(inst->accel_dev);
+		ctx->inst = inst;
+		ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
+						  &ctx->enc_cd_paddr,
+						  GFP_ATOMIC);
+		if (!ctx->enc_cd) {
+			return -ENOMEM;
+		}
+		ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
+						  &ctx->dec_cd_paddr,
+						  GFP_ATOMIC);
+		if (!ctx->dec_cd) {
+			goto out_free_enc;
+		}
+	}
+	if (qat_alg_aead_init_sessions(tfm, key, keylen,
+				       ICP_QAT_HW_CIPHER_CBC_MODE))
+		goto out_free_all;
+
+	return 0;
+
+out_free_all:
+	memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
+	dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+			  ctx->dec_cd, ctx->dec_cd_paddr);
+	ctx->dec_cd = NULL;
+out_free_enc:
+	memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
+	dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+			  ctx->enc_cd, ctx->enc_cd_paddr);
+	ctx->enc_cd = NULL;
+	return -ENOMEM;
+}
+
+static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
+			      struct qat_crypto_request *qat_req)
+{
+	struct device *dev = &GET_DEV(inst->accel_dev);
+	struct qat_alg_buf_list *bl = qat_req->buf.bl;
+	struct qat_alg_buf_list *blout = qat_req->buf.blout;
+	dma_addr_t blp = qat_req->buf.blp;
+	dma_addr_t blpout = qat_req->buf.bloutp;
+	size_t sz = qat_req->buf.sz;
+	size_t sz_out = qat_req->buf.sz_out;
+	int i;
+
+	for (i = 0; i < bl->num_bufs; i++)
+		dma_unmap_single(dev, bl->bufers[i].addr,
+				 bl->bufers[i].len, DMA_BIDIRECTIONAL);
+
+	dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+	kfree(bl);
+	if (blp != blpout) {
+		/* If out of place operation dma unmap only data */
+		int bufless = blout->num_bufs - blout->num_mapped_bufs;
+
+		for (i = bufless; i < blout->num_bufs; i++) {
+			dma_unmap_single(dev, blout->bufers[i].addr,
+					 blout->bufers[i].len,
+					 DMA_BIDIRECTIONAL);
+		}
+		dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
+		kfree(blout);
+	}
+}
+
+static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
+			       struct scatterlist *sgl,
+			       struct scatterlist *sglout,
+			       struct qat_crypto_request *qat_req)
+{
+	struct device *dev = &GET_DEV(inst->accel_dev);
+	int i, sg_nctr = 0;
+	int n = sg_nents(sgl);
+	struct qat_alg_buf_list *bufl;
+	struct qat_alg_buf_list *buflout = NULL;
+	dma_addr_t blp;
+	dma_addr_t bloutp = 0;
+	struct scatterlist *sg;
+	size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
+			((1 + n) * sizeof(struct qat_alg_buf));
+
+	if (unlikely(!n))
+		return -EINVAL;
+
+	bufl = kzalloc_node(sz, GFP_ATOMIC,
+			    dev_to_node(&GET_DEV(inst->accel_dev)));
+	if (unlikely(!bufl))
+		return -ENOMEM;
+
+	blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev, blp)))
+		goto err_in;
+
+	for_each_sg(sgl, sg, n, i) {
+		int y = sg_nctr;
+
+		if (!sg->length)
+			continue;
+
+		bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
+						      sg->length,
+						      DMA_BIDIRECTIONAL);
+		bufl->bufers[y].len = sg->length;
+		if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
+			goto err_in;
+		sg_nctr++;
+	}
+	bufl->num_bufs = sg_nctr;
+	qat_req->buf.bl = bufl;
+	qat_req->buf.blp = blp;
+	qat_req->buf.sz = sz;
+	/* Handle out of place operation */
+	if (sgl != sglout) {
+		struct qat_alg_buf *bufers;
+
+		n = sg_nents(sglout);
+		sz_out = sizeof(struct qat_alg_buf_list) +
+			((1 + n) * sizeof(struct qat_alg_buf));
+		sg_nctr = 0;
+		buflout = kzalloc_node(sz_out, GFP_ATOMIC,
+				       dev_to_node(&GET_DEV(inst->accel_dev)));
+		if (unlikely(!buflout))
+			goto err_in;
+		bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(dev, bloutp)))
+			goto err_out;
+		bufers = buflout->bufers;
+		for_each_sg(sglout, sg, n, i) {
+			int y = sg_nctr;
+
+			if (!sg->length)
+				continue;
+
+			bufers[y].addr = dma_map_single(dev, sg_virt(sg),
+							sg->length,
+							DMA_BIDIRECTIONAL);
+			if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
+				goto err_out;
+			bufers[y].len = sg->length;
+			sg_nctr++;
+		}
+		buflout->num_bufs = sg_nctr;
+		buflout->num_mapped_bufs = sg_nctr;
+		qat_req->buf.blout = buflout;
+		qat_req->buf.bloutp = bloutp;
+		qat_req->buf.sz_out = sz_out;
+	} else {
+		/* Otherwise set the src and dst to the same address */
+		qat_req->buf.bloutp = qat_req->buf.blp;
+		qat_req->buf.sz_out = 0;
+	}
+	return 0;
+
+err_out:
+	n = sg_nents(sglout);
+	for (i = 0; i < n; i++)
+		if (!dma_mapping_error(dev, buflout->bufers[i].addr))
+			dma_unmap_single(dev, buflout->bufers[i].addr,
+					 buflout->bufers[i].len,
+					 DMA_BIDIRECTIONAL);
+	if (!dma_mapping_error(dev, bloutp))
+		dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
+	kfree(buflout);
+
+err_in:
+	n = sg_nents(sgl);
+	for (i = 0; i < n; i++)
+		if (!dma_mapping_error(dev, bufl->bufers[i].addr))
+			dma_unmap_single(dev, bufl->bufers[i].addr,
+					 bufl->bufers[i].len,
+					 DMA_BIDIRECTIONAL);
+
+	if (!dma_mapping_error(dev, blp))
+		dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+	kfree(bufl);
+
+	dev_err(dev, "Failed to map buf for dma\n");
+	return -ENOMEM;
+}
+
+static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
+				  struct qat_crypto_request *qat_req)
+{
+	struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct aead_request *areq = qat_req->aead_req;
+	uint8_t stat_filed = qat_resp->comn_resp.comn_status;
+	int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
+
+	qat_alg_free_bufl(inst, qat_req);
+	if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
+		res = -EBADMSG;
+	areq->base.complete(&areq->base, res);
+}
+
+static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
+					struct qat_crypto_request *qat_req)
+{
+	struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct ablkcipher_request *areq = qat_req->ablkcipher_req;
+	uint8_t stat_filed = qat_resp->comn_resp.comn_status;
+	int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
+
+	qat_alg_free_bufl(inst, qat_req);
+	if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
+		res = -EINVAL;
+	areq->base.complete(&areq->base, res);
+}
+
+void qat_alg_callback(void *resp)
+{
+	struct icp_qat_fw_la_resp *qat_resp = resp;
+	struct qat_crypto_request *qat_req =
+				(void *)(__force long)qat_resp->opaque_data;
+
+	qat_req->cb(qat_resp, qat_req);
+}
+
+static int qat_alg_aead_dec(struct aead_request *areq)
+{
+	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
+	struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
+	struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct qat_crypto_request *qat_req = aead_request_ctx(areq);
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+	struct icp_qat_fw_la_bulk_req *msg;
+	int digst_size = crypto_aead_authsize(aead_tfm);
+	int ret, ctr = 0;
+
+	ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
+	if (unlikely(ret))
+		return ret;
+
+	msg = &qat_req->req;
+	*msg = ctx->dec_fw_req;
+	qat_req->aead_ctx = ctx;
+	qat_req->aead_req = areq;
+	qat_req->cb = qat_aead_alg_callback;
+	qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+	cipher_param->cipher_length = areq->cryptlen - digst_size;
+	cipher_param->cipher_offset = areq->assoclen;
+	memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
+	auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+	auth_param->auth_off = 0;
+	auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
+	do {
+		ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+	} while (ret == -EAGAIN && ctr++ < 10);
+
+	if (ret == -EAGAIN) {
+		qat_alg_free_bufl(ctx->inst, qat_req);
+		return -EBUSY;
+	}
+	return -EINPROGRESS;
+}
+
+static int qat_alg_aead_enc(struct aead_request *areq)
+{
+	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
+	struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
+	struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct qat_crypto_request *qat_req = aead_request_ctx(areq);
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+	struct icp_qat_fw_la_bulk_req *msg;
+	uint8_t *iv = areq->iv;
+	int ret, ctr = 0;
+
+	ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
+	if (unlikely(ret))
+		return ret;
+
+	msg = &qat_req->req;
+	*msg = ctx->enc_fw_req;
+	qat_req->aead_ctx = ctx;
+	qat_req->aead_req = areq;
+	qat_req->cb = qat_aead_alg_callback;
+	qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+
+	memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
+	cipher_param->cipher_length = areq->cryptlen;
+	cipher_param->cipher_offset = areq->assoclen;
+
+	auth_param->auth_off = 0;
+	auth_param->auth_len = areq->assoclen + areq->cryptlen;
+
+	do {
+		ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+	} while (ret == -EAGAIN && ctr++ < 10);
+
+	if (ret == -EAGAIN) {
+		qat_alg_free_bufl(ctx->inst, qat_req);
+		return -EBUSY;
+	}
+	return -EINPROGRESS;
+}
+
+static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
+				     const u8 *key, unsigned int keylen,
+				     int mode)
+{
+	struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct device *dev;
+
+	spin_lock(&ctx->lock);
+	if (ctx->enc_cd) {
+		/* rekeying */
+		dev = &GET_DEV(ctx->inst->accel_dev);
+		memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+		memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+		memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
+		memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
+	} else {
+		/* new key */
+		int node = get_current_node();
+		struct qat_crypto_instance *inst =
+				qat_crypto_get_instance_node(node);
+		if (!inst) {
+			spin_unlock(&ctx->lock);
+			return -EINVAL;
+		}
+
+		dev = &GET_DEV(inst->accel_dev);
+		ctx->inst = inst;
+		ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
+						  &ctx->enc_cd_paddr,
+						  GFP_ATOMIC);
+		if (!ctx->enc_cd) {
+			spin_unlock(&ctx->lock);
+			return -ENOMEM;
+		}
+		ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
+						  &ctx->dec_cd_paddr,
+						  GFP_ATOMIC);
+		if (!ctx->dec_cd) {
+			spin_unlock(&ctx->lock);
+			goto out_free_enc;
+		}
+	}
+	spin_unlock(&ctx->lock);
+	if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode))
+		goto out_free_all;
+
+	return 0;
+
+out_free_all:
+	memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+	dma_free_coherent(dev, sizeof(*ctx->dec_cd),
+			  ctx->dec_cd, ctx->dec_cd_paddr);
+	ctx->dec_cd = NULL;
+out_free_enc:
+	memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+	dma_free_coherent(dev, sizeof(*ctx->enc_cd),
+			  ctx->enc_cd, ctx->enc_cd_paddr);
+	ctx->enc_cd = NULL;
+	return -ENOMEM;
+}
+
+static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
+					 const u8 *key, unsigned int keylen)
+{
+	return qat_alg_ablkcipher_setkey(tfm, key, keylen,
+					 ICP_QAT_HW_CIPHER_CBC_MODE);
+}
+
+static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm,
+					 const u8 *key, unsigned int keylen)
+{
+	return qat_alg_ablkcipher_setkey(tfm, key, keylen,
+					 ICP_QAT_HW_CIPHER_CTR_MODE);
+}
+
+static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm,
+					 const u8 *key, unsigned int keylen)
+{
+	return qat_alg_ablkcipher_setkey(tfm, key, keylen,
+					 ICP_QAT_HW_CIPHER_XTS_MODE);
+}
+
+static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
+	struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_bulk_req *msg;
+	int ret, ctr = 0;
+
+	ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
+	if (unlikely(ret))
+		return ret;
+
+	msg = &qat_req->req;
+	*msg = ctx->enc_fw_req;
+	qat_req->ablkcipher_ctx = ctx;
+	qat_req->ablkcipher_req = req;
+	qat_req->cb = qat_ablkcipher_alg_callback;
+	qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+	cipher_param->cipher_length = req->nbytes;
+	cipher_param->cipher_offset = 0;
+	memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
+	do {
+		ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+	} while (ret == -EAGAIN && ctr++ < 10);
+
+	if (ret == -EAGAIN) {
+		qat_alg_free_bufl(ctx->inst, qat_req);
+		return -EBUSY;
+	}
+	return -EINPROGRESS;
+}
+
+static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
+	struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_bulk_req *msg;
+	int ret, ctr = 0;
+
+	ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
+	if (unlikely(ret))
+		return ret;
+
+	msg = &qat_req->req;
+	*msg = ctx->dec_fw_req;
+	qat_req->ablkcipher_ctx = ctx;
+	qat_req->ablkcipher_req = req;
+	qat_req->cb = qat_ablkcipher_alg_callback;
+	qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+	cipher_param->cipher_length = req->nbytes;
+	cipher_param->cipher_offset = 0;
+	memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
+	do {
+		ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+	} while (ret == -EAGAIN && ctr++ < 10);
+
+	if (ret == -EAGAIN) {
+		qat_alg_free_bufl(ctx->inst, qat_req);
+		return -EBUSY;
+	}
+	return -EINPROGRESS;
+}
+
+static int qat_alg_aead_init(struct crypto_aead *tfm,
+			     enum icp_qat_hw_auth_algo hash,
+			     const char *hash_name)
+{
+	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+	ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+	if (IS_ERR(ctx->hash_tfm))
+		return PTR_ERR(ctx->hash_tfm);
+	ctx->qat_hash_alg = hash;
+	crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
+	return 0;
+}
+
+static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
+{
+	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
+}
+
+static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
+{
+	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
+}
+
+static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
+{
+	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
+}
+
+static void qat_alg_aead_exit(struct crypto_aead *tfm)
+{
+	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct device *dev;
+
+	crypto_free_shash(ctx->hash_tfm);
+
+	if (!inst)
+		return;
+
+	dev = &GET_DEV(inst->accel_dev);
+	if (ctx->enc_cd) {
+		memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
+		dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+				  ctx->enc_cd, ctx->enc_cd_paddr);
+	}
+	if (ctx->dec_cd) {
+		memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
+		dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+				  ctx->dec_cd, ctx->dec_cd_paddr);
+	}
+	qat_crypto_put_instance(inst);
+}
+
+static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
+{
+	struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	spin_lock_init(&ctx->lock);
+	tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
+	ctx->tfm = tfm;
+	return 0;
+}
+
+static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
+{
+	struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct device *dev;
+
+	if (!inst)
+		return;
+
+	dev = &GET_DEV(inst->accel_dev);
+	if (ctx->enc_cd) {
+		memset(ctx->enc_cd, 0,
+		       sizeof(struct icp_qat_hw_cipher_algo_blk));
+		dma_free_coherent(dev,
+				  sizeof(struct icp_qat_hw_cipher_algo_blk),
+				  ctx->enc_cd, ctx->enc_cd_paddr);
+	}
+	if (ctx->dec_cd) {
+		memset(ctx->dec_cd, 0,
+		       sizeof(struct icp_qat_hw_cipher_algo_blk));
+		dma_free_coherent(dev,
+				  sizeof(struct icp_qat_hw_cipher_algo_blk),
+				  ctx->dec_cd, ctx->dec_cd_paddr);
+	}
+	qat_crypto_put_instance(inst);
+}
+
+
+static struct aead_alg qat_aeads[] = { {
+	.base = {
+		.cra_name = "authenc(hmac(sha1),cbc(aes))",
+		.cra_driver_name = "qat_aes_cbc_hmac_sha1",
+		.cra_priority = 4001,
+		.cra_flags = CRYPTO_ALG_ASYNC,
+		.cra_blocksize = AES_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+		.cra_module = THIS_MODULE,
+	},
+	.init = qat_alg_aead_sha1_init,
+	.exit = qat_alg_aead_exit,
+	.setkey = qat_alg_aead_setkey,
+	.decrypt = qat_alg_aead_dec,
+	.encrypt = qat_alg_aead_enc,
+	.ivsize = AES_BLOCK_SIZE,
+	.maxauthsize = SHA1_DIGEST_SIZE,
+}, {
+	.base = {
+		.cra_name = "authenc(hmac(sha256),cbc(aes))",
+		.cra_driver_name = "qat_aes_cbc_hmac_sha256",
+		.cra_priority = 4001,
+		.cra_flags = CRYPTO_ALG_ASYNC,
+		.cra_blocksize = AES_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+		.cra_module = THIS_MODULE,
+	},
+	.init = qat_alg_aead_sha256_init,
+	.exit = qat_alg_aead_exit,
+	.setkey = qat_alg_aead_setkey,
+	.decrypt = qat_alg_aead_dec,
+	.encrypt = qat_alg_aead_enc,
+	.ivsize = AES_BLOCK_SIZE,
+	.maxauthsize = SHA256_DIGEST_SIZE,
+}, {
+	.base = {
+		.cra_name = "authenc(hmac(sha512),cbc(aes))",
+		.cra_driver_name = "qat_aes_cbc_hmac_sha512",
+		.cra_priority = 4001,
+		.cra_flags = CRYPTO_ALG_ASYNC,
+		.cra_blocksize = AES_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+		.cra_module = THIS_MODULE,
+	},
+	.init = qat_alg_aead_sha512_init,
+	.exit = qat_alg_aead_exit,
+	.setkey = qat_alg_aead_setkey,
+	.decrypt = qat_alg_aead_dec,
+	.encrypt = qat_alg_aead_enc,
+	.ivsize = AES_BLOCK_SIZE,
+	.maxauthsize = SHA512_DIGEST_SIZE,
+} };
+
+static struct crypto_alg qat_algs[] = { {
+	.cra_name = "cbc(aes)",
+	.cra_driver_name = "qat_aes_cbc",
+	.cra_priority = 4001,
+	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize = AES_BLOCK_SIZE,
+	.cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
+	.cra_alignmask = 0,
+	.cra_type = &crypto_ablkcipher_type,
+	.cra_module = THIS_MODULE,
+	.cra_init = qat_alg_ablkcipher_init,
+	.cra_exit = qat_alg_ablkcipher_exit,
+	.cra_u = {
+		.ablkcipher = {
+			.setkey = qat_alg_ablkcipher_cbc_setkey,
+			.decrypt = qat_alg_ablkcipher_decrypt,
+			.encrypt = qat_alg_ablkcipher_encrypt,
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+		},
+	},
+}, {
+	.cra_name = "ctr(aes)",
+	.cra_driver_name = "qat_aes_ctr",
+	.cra_priority = 4001,
+	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize = AES_BLOCK_SIZE,
+	.cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
+	.cra_alignmask = 0,
+	.cra_type = &crypto_ablkcipher_type,
+	.cra_module = THIS_MODULE,
+	.cra_init = qat_alg_ablkcipher_init,
+	.cra_exit = qat_alg_ablkcipher_exit,
+	.cra_u = {
+		.ablkcipher = {
+			.setkey = qat_alg_ablkcipher_ctr_setkey,
+			.decrypt = qat_alg_ablkcipher_decrypt,
+			.encrypt = qat_alg_ablkcipher_encrypt,
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+		},
+	},
+}, {
+	.cra_name = "xts(aes)",
+	.cra_driver_name = "qat_aes_xts",
+	.cra_priority = 4001,
+	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize = AES_BLOCK_SIZE,
+	.cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
+	.cra_alignmask = 0,
+	.cra_type = &crypto_ablkcipher_type,
+	.cra_module = THIS_MODULE,
+	.cra_init = qat_alg_ablkcipher_init,
+	.cra_exit = qat_alg_ablkcipher_exit,
+	.cra_u = {
+		.ablkcipher = {
+			.setkey = qat_alg_ablkcipher_xts_setkey,
+			.decrypt = qat_alg_ablkcipher_decrypt,
+			.encrypt = qat_alg_ablkcipher_encrypt,
+			.min_keysize = 2 * AES_MIN_KEY_SIZE,
+			.max_keysize = 2 * AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+		},
+	},
+} };
+
+int qat_algs_register(void)
+{
+	int ret = 0, i;
+
+	mutex_lock(&algs_lock);
+	if (++active_devs != 1)
+		goto unlock;
+
+	for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
+		qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
+
+	ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
+	if (ret)
+		goto unlock;
+
+	for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
+		qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
+
+	ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
+	if (ret)
+		goto unreg_algs;
+
+unlock:
+	mutex_unlock(&algs_lock);
+	return ret;
+
+unreg_algs:
+	crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+	goto unlock;
+}
+
+void qat_algs_unregister(void)
+{
+	mutex_lock(&algs_lock);
+	if (--active_devs != 0)
+		goto unlock;
+
+	crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
+	crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+
+unlock:
+	mutex_unlock(&algs_lock);
+}
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/qat_asym_algs.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/qat_asym_algs.c
new file mode 100644
index 0000000..6f5dd68
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -0,0 +1,1361 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+	* Redistributions of source code must retain the above copyright
+	  notice, this list of conditions and the following disclaimer.
+	* Redistributions in binary form must reproduce the above copyright
+	  notice, this list of conditions and the following disclaimer in
+	  the documentation and/or other materials provided with the
+	  distribution.
+	* Neither the name of Intel Corporation nor the names of its
+	  contributors may be used to endorse or promote products derived
+	  from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <linux/module.h>
+#include <crypto/internal/rsa.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/akcipher.h>
+#include <crypto/kpp.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/dh.h>
+#include <linux/dma-mapping.h>
+#include <linux/fips.h>
+#include <crypto/scatterwalk.h>
+#include "icp_qat_fw_pke.h"
+#include "adf_accel_devices.h"
+#include "adf_transport.h"
+#include "adf_common_drv.h"
+#include "qat_crypto.h"
+
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
+
+struct qat_rsa_input_params {
+	union {
+		struct {
+			dma_addr_t m;
+			dma_addr_t e;
+			dma_addr_t n;
+		} enc;
+		struct {
+			dma_addr_t c;
+			dma_addr_t d;
+			dma_addr_t n;
+		} dec;
+		struct {
+			dma_addr_t c;
+			dma_addr_t p;
+			dma_addr_t q;
+			dma_addr_t dp;
+			dma_addr_t dq;
+			dma_addr_t qinv;
+		} dec_crt;
+		u64 in_tab[8];
+	};
+} __packed __aligned(64);
+
+struct qat_rsa_output_params {
+	union {
+		struct {
+			dma_addr_t c;
+		} enc;
+		struct {
+			dma_addr_t m;
+		} dec;
+		u64 out_tab[8];
+	};
+} __packed __aligned(64);
+
+struct qat_rsa_ctx {
+	char *n;
+	char *e;
+	char *d;
+	char *p;
+	char *q;
+	char *dp;
+	char *dq;
+	char *qinv;
+	dma_addr_t dma_n;
+	dma_addr_t dma_e;
+	dma_addr_t dma_d;
+	dma_addr_t dma_p;
+	dma_addr_t dma_q;
+	dma_addr_t dma_dp;
+	dma_addr_t dma_dq;
+	dma_addr_t dma_qinv;
+	unsigned int key_sz;
+	bool crt_mode;
+	struct qat_crypto_instance *inst;
+} __packed __aligned(64);
+
+struct qat_dh_input_params {
+	union {
+		struct {
+			dma_addr_t b;
+			dma_addr_t xa;
+			dma_addr_t p;
+		} in;
+		struct {
+			dma_addr_t xa;
+			dma_addr_t p;
+		} in_g2;
+		u64 in_tab[8];
+	};
+} __packed __aligned(64);
+
+struct qat_dh_output_params {
+	union {
+		dma_addr_t r;
+		u64 out_tab[8];
+	};
+} __packed __aligned(64);
+
+struct qat_dh_ctx {
+	char *g;
+	char *xa;
+	char *p;
+	dma_addr_t dma_g;
+	dma_addr_t dma_xa;
+	dma_addr_t dma_p;
+	unsigned int p_size;
+	bool g2;
+	struct qat_crypto_instance *inst;
+} __packed __aligned(64);
+
+struct qat_asym_request {
+	union {
+		struct qat_rsa_input_params rsa;
+		struct qat_dh_input_params dh;
+	} in;
+	union {
+		struct qat_rsa_output_params rsa;
+		struct qat_dh_output_params dh;
+	} out;
+	dma_addr_t phy_in;
+	dma_addr_t phy_out;
+	char *src_align;
+	char *dst_align;
+	struct icp_qat_fw_pke_request req;
+	union {
+		struct qat_rsa_ctx *rsa;
+		struct qat_dh_ctx *dh;
+	} ctx;
+	union {
+		struct akcipher_request *rsa;
+		struct kpp_request *dh;
+	} areq;
+	int err;
+	void (*cb)(struct icp_qat_fw_pke_resp *resp);
+} __aligned(64);
+
+static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
+{
+	struct qat_asym_request *req = (void *)(__force long)resp->opaque;
+	struct kpp_request *areq = req->areq.dh;
+	struct device *dev = &GET_DEV(req->ctx.dh->inst->accel_dev);
+	int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+				resp->pke_resp_hdr.comn_resp_flags);
+
+	err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
+
+	if (areq->src) {
+		if (req->src_align)
+			dma_free_coherent(dev, req->ctx.dh->p_size,
+					  req->src_align, req->in.dh.in.b);
+		else
+			dma_unmap_single(dev, req->in.dh.in.b,
+					 req->ctx.dh->p_size, DMA_TO_DEVICE);
+	}
+
+	areq->dst_len = req->ctx.dh->p_size;
+	if (req->dst_align) {
+		scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
+					 areq->dst_len, 1);
+
+		dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align,
+				  req->out.dh.r);
+	} else {
+		dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
+				 DMA_FROM_DEVICE);
+	}
+
+	dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
+			 DMA_TO_DEVICE);
+	dma_unmap_single(dev, req->phy_out,
+			 sizeof(struct qat_dh_output_params),
+			 DMA_TO_DEVICE);
+
+	kpp_request_complete(areq, err);
+}
+
+#define PKE_DH_1536 0x390c1a49
+#define PKE_DH_G2_1536 0x2e0b1a3e
+#define PKE_DH_2048 0x4d0c1a60
+#define PKE_DH_G2_2048 0x3e0b1a55
+#define PKE_DH_3072 0x510c1a77
+#define PKE_DH_G2_3072 0x3a0b1a6c
+#define PKE_DH_4096 0x690c1a8e
+#define PKE_DH_G2_4096 0x4a0b1a83
+
+static unsigned long qat_dh_fn_id(unsigned int len, bool g2)
+{
+	unsigned int bitslen = len << 3;
+
+	switch (bitslen) {
+	case 1536:
+		return g2 ? PKE_DH_G2_1536 : PKE_DH_1536;
+	case 2048:
+		return g2 ? PKE_DH_G2_2048 : PKE_DH_2048;
+	case 3072:
+		return g2 ? PKE_DH_G2_3072 : PKE_DH_3072;
+	case 4096:
+		return g2 ? PKE_DH_G2_4096 : PKE_DH_4096;
+	default:
+		return 0;
+	};
+}
+
+static inline struct qat_dh_ctx *qat_dh_get_params(struct crypto_kpp *tfm)
+{
+	return kpp_tfm_ctx(tfm);
+}
+
+static int qat_dh_compute_value(struct kpp_request *req)
+{
+	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+	struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct device *dev = &GET_DEV(inst->accel_dev);
+	struct qat_asym_request *qat_req =
+			PTR_ALIGN(kpp_request_ctx(req), 64);
+	struct icp_qat_fw_pke_request *msg = &qat_req->req;
+	int ret, ctr = 0;
+	int n_input_params = 0;
+
+	if (unlikely(!ctx->xa))
+		return -EINVAL;
+
+	if (req->dst_len < ctx->p_size) {
+		req->dst_len = ctx->p_size;
+		return -EOVERFLOW;
+	}
+	memset(msg, '\0', sizeof(*msg));
+	ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+					  ICP_QAT_FW_COMN_REQ_FLAG_SET);
+
+	msg->pke_hdr.cd_pars.func_id = qat_dh_fn_id(ctx->p_size,
+						    !req->src && ctx->g2);
+	if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+		return -EINVAL;
+
+	qat_req->cb = qat_dh_cb;
+	qat_req->ctx.dh = ctx;
+	qat_req->areq.dh = req;
+	msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+	msg->pke_hdr.comn_req_flags =
+		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+					    QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+
+	/*
+	 * If no source is provided use g as base
+	 */
+	if (req->src) {
+		qat_req->in.dh.in.xa = ctx->dma_xa;
+		qat_req->in.dh.in.p = ctx->dma_p;
+		n_input_params = 3;
+	} else {
+		if (ctx->g2) {
+			qat_req->in.dh.in_g2.xa = ctx->dma_xa;
+			qat_req->in.dh.in_g2.p = ctx->dma_p;
+			n_input_params = 2;
+		} else {
+			qat_req->in.dh.in.b = ctx->dma_g;
+			qat_req->in.dh.in.xa = ctx->dma_xa;
+			qat_req->in.dh.in.p = ctx->dma_p;
+			n_input_params = 3;
+		}
+	}
+
+	ret = -ENOMEM;
+	if (req->src) {
+		/*
+		 * src can be of any size in valid range, but HW expects it to
+		 * be the same as modulo p so in case it is different we need
+		 * to allocate a new buf and copy src data.
+		 * In other case we just need to map the user provided buffer.
+		 * Also need to make sure that it is in contiguous buffer.
+		 */
+		if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
+			qat_req->src_align = NULL;
+			qat_req->in.dh.in.b = dma_map_single(dev,
+							     sg_virt(req->src),
+							     req->src_len,
+							     DMA_TO_DEVICE);
+			if (unlikely(dma_mapping_error(dev,
+						       qat_req->in.dh.in.b)))
+				return ret;
+
+		} else {
+			int shift = ctx->p_size - req->src_len;
+
+			qat_req->src_align = dma_zalloc_coherent(dev,
+								 ctx->p_size,
+								 &qat_req->in.dh.in.b,
+								 GFP_KERNEL);
+			if (unlikely(!qat_req->src_align))
+				return ret;
+
+			scatterwalk_map_and_copy(qat_req->src_align + shift,
+						 req->src, 0, req->src_len, 0);
+		}
+	}
+	/*
+	 * dst can be of any size in valid range, but HW expects it to be the
+	 * same as modulo m so in case it is different we need to allocate a
+	 * new buf and copy src data.
+	 * In other case we just need to map the user provided buffer.
+	 * Also need to make sure that it is in contiguous buffer.
+	 */
+	if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
+		qat_req->dst_align = NULL;
+		qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst),
+						   req->dst_len,
+						   DMA_FROM_DEVICE);
+
+		if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
+			goto unmap_src;
+
+	} else {
+		qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size,
+							 &qat_req->out.dh.r,
+							 GFP_KERNEL);
+		if (unlikely(!qat_req->dst_align))
+			goto unmap_src;
+	}
+
+	qat_req->in.dh.in_tab[n_input_params] = 0;
+	qat_req->out.dh.out_tab[1] = 0;
+	/* Mapping in.in.b or in.in_g2.xa is the same */
+	qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b,
+					 sizeof(struct qat_dh_input_params),
+					 DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
+		goto unmap_dst;
+
+	qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r,
+					  sizeof(struct qat_dh_output_params),
+					  DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
+		goto unmap_in_params;
+
+	msg->pke_mid.src_data_addr = qat_req->phy_in;
+	msg->pke_mid.dest_data_addr = qat_req->phy_out;
+	msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
+	msg->input_param_count = n_input_params;
+	msg->output_param_count = 1;
+
+	do {
+		ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
+	} while (ret == -EBUSY && ctr++ < 100);
+
+	if (!ret)
+		return -EINPROGRESS;
+
+	if (!dma_mapping_error(dev, qat_req->phy_out))
+		dma_unmap_single(dev, qat_req->phy_out,
+				 sizeof(struct qat_dh_output_params),
+				 DMA_TO_DEVICE);
+unmap_in_params:
+	if (!dma_mapping_error(dev, qat_req->phy_in))
+		dma_unmap_single(dev, qat_req->phy_in,
+				 sizeof(struct qat_dh_input_params),
+				 DMA_TO_DEVICE);
+unmap_dst:
+	if (qat_req->dst_align)
+		dma_free_coherent(dev, ctx->p_size, qat_req->dst_align,
+				  qat_req->out.dh.r);
+	else
+		if (!dma_mapping_error(dev, qat_req->out.dh.r))
+			dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
+					 DMA_FROM_DEVICE);
+unmap_src:
+	if (req->src) {
+		if (qat_req->src_align)
+			dma_free_coherent(dev, ctx->p_size, qat_req->src_align,
+					  qat_req->in.dh.in.b);
+		else
+			if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
+				dma_unmap_single(dev, qat_req->in.dh.in.b,
+						 ctx->p_size,
+						 DMA_TO_DEVICE);
+	}
+	return ret;
+}
+
+static int qat_dh_check_params_length(unsigned int p_len)
+{
+	switch (p_len) {
+	case 1536:
+	case 2048:
+	case 3072:
+	case 4096:
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
+{
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct device *dev = &GET_DEV(inst->accel_dev);
+
+	if (unlikely(!params->p || !params->g))
+		return -EINVAL;
+
+	if (qat_dh_check_params_length(params->p_size << 3))
+		return -EINVAL;
+
+	ctx->p_size = params->p_size;
+	ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
+	if (!ctx->p)
+		return -ENOMEM;
+	memcpy(ctx->p, params->p, ctx->p_size);
+
+	/* If g equals 2 don't copy it */
+	if (params->g_size == 1 && *(char *)params->g == 0x02) {
+		ctx->g2 = true;
+		return 0;
+	}
+
+	ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
+	if (!ctx->g) {
+		dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
+		ctx->p = NULL;
+		return -ENOMEM;
+	}
+	memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
+	       params->g_size);
+
+	return 0;
+}
+
+static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
+{
+	if (ctx->g) {
+		dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g);
+		ctx->g = NULL;
+	}
+	if (ctx->xa) {
+		dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa);
+		ctx->xa = NULL;
+	}
+	if (ctx->p) {
+		dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
+		ctx->p = NULL;
+	}
+	ctx->p_size = 0;
+	ctx->g2 = false;
+}
+
+static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
+			     unsigned int len)
+{
+	struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+	struct dh params;
+	int ret;
+
+	if (crypto_dh_decode_key(buf, len, &params) < 0)
+		return -EINVAL;
+
+	/* Free old secret if any */
+	qat_dh_clear_ctx(dev, ctx);
+
+	ret = qat_dh_set_params(ctx, &params);
+	if (ret < 0)
+		return ret;
+
+	ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
+				      GFP_KERNEL);
+	if (!ctx->xa) {
+		qat_dh_clear_ctx(dev, ctx);
+		return -ENOMEM;
+	}
+	memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key,
+	       params.key_size);
+
+	return 0;
+}
+
+static unsigned int qat_dh_max_size(struct crypto_kpp *tfm)
+{
+	struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+
+	return ctx->p_size;
+}
+
+static int qat_dh_init_tfm(struct crypto_kpp *tfm)
+{
+	struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+	struct qat_crypto_instance *inst =
+			qat_crypto_get_instance_node(get_current_node());
+
+	if (!inst)
+		return -EINVAL;
+
+	ctx->p_size = 0;
+	ctx->g2 = false;
+	ctx->inst = inst;
+	return 0;
+}
+
+static void qat_dh_exit_tfm(struct crypto_kpp *tfm)
+{
+	struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+
+	qat_dh_clear_ctx(dev, ctx);
+	qat_crypto_put_instance(ctx->inst);
+}
+
+static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
+{
+	struct qat_asym_request *req = (void *)(__force long)resp->opaque;
+	struct akcipher_request *areq = req->areq.rsa;
+	struct device *dev = &GET_DEV(req->ctx.rsa->inst->accel_dev);
+	int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+				resp->pke_resp_hdr.comn_resp_flags);
+
+	err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
+
+	if (req->src_align)
+		dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align,
+				  req->in.rsa.enc.m);
+	else
+		dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
+				 DMA_TO_DEVICE);
+
+	areq->dst_len = req->ctx.rsa->key_sz;
+	if (req->dst_align) {
+		scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
+					 areq->dst_len, 1);
+
+		dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align,
+				  req->out.rsa.enc.c);
+	} else {
+		dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
+				 DMA_FROM_DEVICE);
+	}
+
+	dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
+			 DMA_TO_DEVICE);
+	dma_unmap_single(dev, req->phy_out,
+			 sizeof(struct qat_rsa_output_params),
+			 DMA_TO_DEVICE);
+
+	akcipher_request_complete(areq, err);
+}
+
+void qat_alg_asym_callback(void *_resp)
+{
+	struct icp_qat_fw_pke_resp *resp = _resp;
+	struct qat_asym_request *areq = (void *)(__force long)resp->opaque;
+
+	areq->cb(resp);
+}
+
+#define PKE_RSA_EP_512 0x1c161b21
+#define PKE_RSA_EP_1024 0x35111bf7
+#define PKE_RSA_EP_1536 0x4d111cdc
+#define PKE_RSA_EP_2048 0x6e111dba
+#define PKE_RSA_EP_3072 0x7d111ea3
+#define PKE_RSA_EP_4096 0xa5101f7e
+
+static unsigned long qat_rsa_enc_fn_id(unsigned int len)
+{
+	unsigned int bitslen = len << 3;
+
+	switch (bitslen) {
+	case 512:
+		return PKE_RSA_EP_512;
+	case 1024:
+		return PKE_RSA_EP_1024;
+	case 1536:
+		return PKE_RSA_EP_1536;
+	case 2048:
+		return PKE_RSA_EP_2048;
+	case 3072:
+		return PKE_RSA_EP_3072;
+	case 4096:
+		return PKE_RSA_EP_4096;
+	default:
+		return 0;
+	};
+}
+
+#define PKE_RSA_DP1_512 0x1c161b3c
+#define PKE_RSA_DP1_1024 0x35111c12
+#define PKE_RSA_DP1_1536 0x4d111cf7
+#define PKE_RSA_DP1_2048 0x6e111dda
+#define PKE_RSA_DP1_3072 0x7d111ebe
+#define PKE_RSA_DP1_4096 0xa5101f98
+
+static unsigned long qat_rsa_dec_fn_id(unsigned int len)
+{
+	unsigned int bitslen = len << 3;
+
+	switch (bitslen) {
+	case 512:
+		return PKE_RSA_DP1_512;
+	case 1024:
+		return PKE_RSA_DP1_1024;
+	case 1536:
+		return PKE_RSA_DP1_1536;
+	case 2048:
+		return PKE_RSA_DP1_2048;
+	case 3072:
+		return PKE_RSA_DP1_3072;
+	case 4096:
+		return PKE_RSA_DP1_4096;
+	default:
+		return 0;
+	};
+}
+
+#define PKE_RSA_DP2_512 0x1c131b57
+#define PKE_RSA_DP2_1024 0x26131c2d
+#define PKE_RSA_DP2_1536 0x45111d12
+#define PKE_RSA_DP2_2048 0x59121dfa
+#define PKE_RSA_DP2_3072 0x81121ed9
+#define PKE_RSA_DP2_4096 0xb1111fb2
+
+static unsigned long qat_rsa_dec_fn_id_crt(unsigned int len)
+{
+	unsigned int bitslen = len << 3;
+
+	switch (bitslen) {
+	case 512:
+		return PKE_RSA_DP2_512;
+	case 1024:
+		return PKE_RSA_DP2_1024;
+	case 1536:
+		return PKE_RSA_DP2_1536;
+	case 2048:
+		return PKE_RSA_DP2_2048;
+	case 3072:
+		return PKE_RSA_DP2_3072;
+	case 4096:
+		return PKE_RSA_DP2_4096;
+	default:
+		return 0;
+	};
+}
+
+static int qat_rsa_enc(struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct device *dev = &GET_DEV(inst->accel_dev);
+	struct qat_asym_request *qat_req =
+			PTR_ALIGN(akcipher_request_ctx(req), 64);
+	struct icp_qat_fw_pke_request *msg = &qat_req->req;
+	int ret, ctr = 0;
+
+	if (unlikely(!ctx->n || !ctx->e))
+		return -EINVAL;
+
+	if (req->dst_len < ctx->key_sz) {
+		req->dst_len = ctx->key_sz;
+		return -EOVERFLOW;
+	}
+	memset(msg, '\0', sizeof(*msg));
+	ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+					  ICP_QAT_FW_COMN_REQ_FLAG_SET);
+	msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz);
+	if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+		return -EINVAL;
+
+	qat_req->cb = qat_rsa_cb;
+	qat_req->ctx.rsa = ctx;
+	qat_req->areq.rsa = req;
+	msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+	msg->pke_hdr.comn_req_flags =
+		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+					    QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+
+	qat_req->in.rsa.enc.e = ctx->dma_e;
+	qat_req->in.rsa.enc.n = ctx->dma_n;
+	ret = -ENOMEM;
+
+	/*
+	 * src can be of any size in valid range, but HW expects it to be the
+	 * same as modulo n so in case it is different we need to allocate a
+	 * new buf and copy src data.
+	 * In other case we just need to map the user provided buffer.
+	 * Also need to make sure that it is in contiguous buffer.
+	 */
+	if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
+		qat_req->src_align = NULL;
+		qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src),
+						   req->src_len, DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
+			return ret;
+
+	} else {
+		int shift = ctx->key_sz - req->src_len;
+
+		qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
+							 &qat_req->in.rsa.enc.m,
+							 GFP_KERNEL);
+		if (unlikely(!qat_req->src_align))
+			return ret;
+
+		scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
+					 0, req->src_len, 0);
+	}
+	if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
+		qat_req->dst_align = NULL;
+		qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst),
+							req->dst_len,
+							DMA_FROM_DEVICE);
+
+		if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
+			goto unmap_src;
+
+	} else {
+		qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
+							 &qat_req->out.rsa.enc.c,
+							 GFP_KERNEL);
+		if (unlikely(!qat_req->dst_align))
+			goto unmap_src;
+
+	}
+	qat_req->in.rsa.in_tab[3] = 0;
+	qat_req->out.rsa.out_tab[1] = 0;
+	qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
+					 sizeof(struct qat_rsa_input_params),
+					 DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
+		goto unmap_dst;
+
+	qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c,
+					  sizeof(struct qat_rsa_output_params),
+					  DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
+		goto unmap_in_params;
+
+	msg->pke_mid.src_data_addr = qat_req->phy_in;
+	msg->pke_mid.dest_data_addr = qat_req->phy_out;
+	msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
+	msg->input_param_count = 3;
+	msg->output_param_count = 1;
+	do {
+		ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
+	} while (ret == -EBUSY && ctr++ < 100);
+
+	if (!ret)
+		return -EINPROGRESS;
+
+	if (!dma_mapping_error(dev, qat_req->phy_out))
+		dma_unmap_single(dev, qat_req->phy_out,
+				 sizeof(struct qat_rsa_output_params),
+				 DMA_TO_DEVICE);
+unmap_in_params:
+	if (!dma_mapping_error(dev, qat_req->phy_in))
+		dma_unmap_single(dev, qat_req->phy_in,
+				 sizeof(struct qat_rsa_input_params),
+				 DMA_TO_DEVICE);
+unmap_dst:
+	if (qat_req->dst_align)
+		dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
+				  qat_req->out.rsa.enc.c);
+	else
+		if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
+			dma_unmap_single(dev, qat_req->out.rsa.enc.c,
+					 ctx->key_sz, DMA_FROM_DEVICE);
+unmap_src:
+	if (qat_req->src_align)
+		dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
+				  qat_req->in.rsa.enc.m);
+	else
+		if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
+			dma_unmap_single(dev, qat_req->in.rsa.enc.m,
+					 ctx->key_sz, DMA_TO_DEVICE);
+	return ret;
+}
+
+static int qat_rsa_dec(struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct device *dev = &GET_DEV(inst->accel_dev);
+	struct qat_asym_request *qat_req =
+			PTR_ALIGN(akcipher_request_ctx(req), 64);
+	struct icp_qat_fw_pke_request *msg = &qat_req->req;
+	int ret, ctr = 0;
+
+	if (unlikely(!ctx->n || !ctx->d))
+		return -EINVAL;
+
+	if (req->dst_len < ctx->key_sz) {
+		req->dst_len = ctx->key_sz;
+		return -EOVERFLOW;
+	}
+	memset(msg, '\0', sizeof(*msg));
+	ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+					  ICP_QAT_FW_COMN_REQ_FLAG_SET);
+	msg->pke_hdr.cd_pars.func_id = ctx->crt_mode ?
+		qat_rsa_dec_fn_id_crt(ctx->key_sz) :
+		qat_rsa_dec_fn_id(ctx->key_sz);
+	if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+		return -EINVAL;
+
+	qat_req->cb = qat_rsa_cb;
+	qat_req->ctx.rsa = ctx;
+	qat_req->areq.rsa = req;
+	msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+	msg->pke_hdr.comn_req_flags =
+		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+					    QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+
+	if (ctx->crt_mode) {
+		qat_req->in.rsa.dec_crt.p = ctx->dma_p;
+		qat_req->in.rsa.dec_crt.q = ctx->dma_q;
+		qat_req->in.rsa.dec_crt.dp = ctx->dma_dp;
+		qat_req->in.rsa.dec_crt.dq = ctx->dma_dq;
+		qat_req->in.rsa.dec_crt.qinv = ctx->dma_qinv;
+	} else {
+		qat_req->in.rsa.dec.d = ctx->dma_d;
+		qat_req->in.rsa.dec.n = ctx->dma_n;
+	}
+	ret = -ENOMEM;
+
+	/*
+	 * src can be of any size in valid range, but HW expects it to be the
+	 * same as modulo n so in case it is different we need to allocate a
+	 * new buf and copy src data.
+	 * In other case we just need to map the user provided buffer.
+	 * Also need to make sure that it is in contiguous buffer.
+	 */
+	if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
+		qat_req->src_align = NULL;
+		qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src),
+						   req->dst_len, DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
+			return ret;
+
+	} else {
+		int shift = ctx->key_sz - req->src_len;
+
+		qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
+							 &qat_req->in.rsa.dec.c,
+							 GFP_KERNEL);
+		if (unlikely(!qat_req->src_align))
+			return ret;
+
+		scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
+					 0, req->src_len, 0);
+	}
+	if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
+		qat_req->dst_align = NULL;
+		qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst),
+						    req->dst_len,
+						    DMA_FROM_DEVICE);
+
+		if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
+			goto unmap_src;
+
+	} else {
+		qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
+							 &qat_req->out.rsa.dec.m,
+							 GFP_KERNEL);
+		if (unlikely(!qat_req->dst_align))
+			goto unmap_src;
+
+	}
+
+	if (ctx->crt_mode)
+		qat_req->in.rsa.in_tab[6] = 0;
+	else
+		qat_req->in.rsa.in_tab[3] = 0;
+	qat_req->out.rsa.out_tab[1] = 0;
+	qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c,
+					 sizeof(struct qat_rsa_input_params),
+					 DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
+		goto unmap_dst;
+
+	qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m,
+					  sizeof(struct qat_rsa_output_params),
+					  DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
+		goto unmap_in_params;
+
+	msg->pke_mid.src_data_addr = qat_req->phy_in;
+	msg->pke_mid.dest_data_addr = qat_req->phy_out;
+	msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
+	if (ctx->crt_mode)
+		msg->input_param_count = 6;
+	else
+		msg->input_param_count = 3;
+
+	msg->output_param_count = 1;
+	do {
+		ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
+	} while (ret == -EBUSY && ctr++ < 100);
+
+	if (!ret)
+		return -EINPROGRESS;
+
+	if (!dma_mapping_error(dev, qat_req->phy_out))
+		dma_unmap_single(dev, qat_req->phy_out,
+				 sizeof(struct qat_rsa_output_params),
+				 DMA_TO_DEVICE);
+unmap_in_params:
+	if (!dma_mapping_error(dev, qat_req->phy_in))
+		dma_unmap_single(dev, qat_req->phy_in,
+				 sizeof(struct qat_rsa_input_params),
+				 DMA_TO_DEVICE);
+unmap_dst:
+	if (qat_req->dst_align)
+		dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
+				  qat_req->out.rsa.dec.m);
+	else
+		if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
+			dma_unmap_single(dev, qat_req->out.rsa.dec.m,
+					 ctx->key_sz, DMA_FROM_DEVICE);
+unmap_src:
+	if (qat_req->src_align)
+		dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
+				  qat_req->in.rsa.dec.c);
+	else
+		if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
+			dma_unmap_single(dev, qat_req->in.rsa.dec.c,
+					 ctx->key_sz, DMA_TO_DEVICE);
+	return ret;
+}
+
+int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
+{
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct device *dev = &GET_DEV(inst->accel_dev);
+	const char *ptr = value;
+	int ret;
+
+	while (!*ptr && vlen) {
+		ptr++;
+		vlen--;
+	}
+
+	ctx->key_sz = vlen;
+	ret = -EINVAL;
+	/* invalid key size provided */
+	if (!qat_rsa_enc_fn_id(ctx->key_sz))
+		goto err;
+
+	ret = -ENOMEM;
+	ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
+	if (!ctx->n)
+		goto err;
+
+	memcpy(ctx->n, ptr, ctx->key_sz);
+	return 0;
+err:
+	ctx->key_sz = 0;
+	ctx->n = NULL;
+	return ret;
+}
+
+int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
+{
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct device *dev = &GET_DEV(inst->accel_dev);
+	const char *ptr = value;
+
+	while (!*ptr && vlen) {
+		ptr++;
+		vlen--;
+	}
+
+	if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
+		ctx->e = NULL;
+		return -EINVAL;
+	}
+
+	ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
+	if (!ctx->e)
+		return -ENOMEM;
+
+	memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
+	return 0;
+}
+
+int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
+{
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct device *dev = &GET_DEV(inst->accel_dev);
+	const char *ptr = value;
+	int ret;
+
+	while (!*ptr && vlen) {
+		ptr++;
+		vlen--;
+	}
+
+	ret = -EINVAL;
+	if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
+		goto err;
+
+	ret = -ENOMEM;
+	ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
+	if (!ctx->d)
+		goto err;
+
+	memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
+	return 0;
+err:
+	ctx->d = NULL;
+	return ret;
+}
+
+static void qat_rsa_drop_leading_zeros(const char **ptr, unsigned int *len)
+{
+	while (!**ptr && *len) {
+		(*ptr)++;
+		(*len)--;
+	}
+}
+
+static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
+{
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct device *dev = &GET_DEV(inst->accel_dev);
+	const char *ptr;
+	unsigned int len;
+	unsigned int half_key_sz = ctx->key_sz / 2;
+
+	/* p */
+	ptr = rsa_key->p;
+	len = rsa_key->p_sz;
+	qat_rsa_drop_leading_zeros(&ptr, &len);
+	if (!len)
+		goto err;
+	ctx->p = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
+	if (!ctx->p)
+		goto err;
+	memcpy(ctx->p + (half_key_sz - len), ptr, len);
+
+	/* q */
+	ptr = rsa_key->q;
+	len = rsa_key->q_sz;
+	qat_rsa_drop_leading_zeros(&ptr, &len);
+	if (!len)
+		goto free_p;
+	ctx->q = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
+	if (!ctx->q)
+		goto free_p;
+	memcpy(ctx->q + (half_key_sz - len), ptr, len);
+
+	/* dp */
+	ptr = rsa_key->dp;
+	len = rsa_key->dp_sz;
+	qat_rsa_drop_leading_zeros(&ptr, &len);
+	if (!len)
+		goto free_q;
+	ctx->dp = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dp,
+				      GFP_KERNEL);
+	if (!ctx->dp)
+		goto free_q;
+	memcpy(ctx->dp + (half_key_sz - len), ptr, len);
+
+	/* dq */
+	ptr = rsa_key->dq;
+	len = rsa_key->dq_sz;
+	qat_rsa_drop_leading_zeros(&ptr, &len);
+	if (!len)
+		goto free_dp;
+	ctx->dq = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dq,
+				      GFP_KERNEL);
+	if (!ctx->dq)
+		goto free_dp;
+	memcpy(ctx->dq + (half_key_sz - len), ptr, len);
+
+	/* qinv */
+	ptr = rsa_key->qinv;
+	len = rsa_key->qinv_sz;
+	qat_rsa_drop_leading_zeros(&ptr, &len);
+	if (!len)
+		goto free_dq;
+	ctx->qinv = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
+					GFP_KERNEL);
+	if (!ctx->qinv)
+		goto free_dq;
+	memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
+
+	ctx->crt_mode = true;
+	return;
+
+free_dq:
+	memset(ctx->dq, '\0', half_key_sz);
+	dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
+	ctx->dq = NULL;
+free_dp:
+	memset(ctx->dp, '\0', half_key_sz);
+	dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
+	ctx->dp = NULL;
+free_q:
+	memset(ctx->q, '\0', half_key_sz);
+	dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
+	ctx->q = NULL;
+free_p:
+	memset(ctx->p, '\0', half_key_sz);
+	dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
+	ctx->p = NULL;
+err:
+	ctx->crt_mode = false;
+}
+
+static void qat_rsa_clear_ctx(struct device *dev, struct qat_rsa_ctx *ctx)
+{
+	unsigned int half_key_sz = ctx->key_sz / 2;
+
+	/* Free the old key if any */
+	if (ctx->n)
+		dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
+	if (ctx->e)
+		dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
+	if (ctx->d) {
+		memset(ctx->d, '\0', ctx->key_sz);
+		dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
+	}
+	if (ctx->p) {
+		memset(ctx->p, '\0', half_key_sz);
+		dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
+	}
+	if (ctx->q) {
+		memset(ctx->q, '\0', half_key_sz);
+		dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
+	}
+	if (ctx->dp) {
+		memset(ctx->dp, '\0', half_key_sz);
+		dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
+	}
+	if (ctx->dq) {
+		memset(ctx->dq, '\0', half_key_sz);
+		dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
+	}
+	if (ctx->qinv) {
+		memset(ctx->qinv, '\0', half_key_sz);
+		dma_free_coherent(dev, half_key_sz, ctx->qinv, ctx->dma_qinv);
+	}
+
+	ctx->n = NULL;
+	ctx->e = NULL;
+	ctx->d = NULL;
+	ctx->p = NULL;
+	ctx->q = NULL;
+	ctx->dp = NULL;
+	ctx->dq = NULL;
+	ctx->qinv = NULL;
+	ctx->crt_mode = false;
+	ctx->key_sz = 0;
+}
+
+static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
+			  unsigned int keylen, bool private)
+{
+	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+	struct rsa_key rsa_key;
+	int ret;
+
+	qat_rsa_clear_ctx(dev, ctx);
+
+	if (private)
+		ret = rsa_parse_priv_key(&rsa_key, key, keylen);
+	else
+		ret = rsa_parse_pub_key(&rsa_key, key, keylen);
+	if (ret < 0)
+		goto free;
+
+	ret = qat_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz);
+	if (ret < 0)
+		goto free;
+	ret = qat_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
+	if (ret < 0)
+		goto free;
+	if (private) {
+		ret = qat_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
+		if (ret < 0)
+			goto free;
+		qat_rsa_setkey_crt(ctx, &rsa_key);
+	}
+
+	if (!ctx->n || !ctx->e) {
+		/* invalid key provided */
+		ret = -EINVAL;
+		goto free;
+	}
+	if (private && !ctx->d) {
+		/* invalid private key provided */
+		ret = -EINVAL;
+		goto free;
+	}
+
+	return 0;
+free:
+	qat_rsa_clear_ctx(dev, ctx);
+	return ret;
+}
+
+static int qat_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
+			     unsigned int keylen)
+{
+	return qat_rsa_setkey(tfm, key, keylen, false);
+}
+
+static int qat_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
+			      unsigned int keylen)
+{
+	return qat_rsa_setkey(tfm, key, keylen, true);
+}
+
+static unsigned int qat_rsa_max_size(struct crypto_akcipher *tfm)
+{
+	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+	return ctx->key_sz;
+}
+
+static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct qat_crypto_instance *inst =
+			qat_crypto_get_instance_node(get_current_node());
+
+	if (!inst)
+		return -EINVAL;
+
+	ctx->key_sz = 0;
+	ctx->inst = inst;
+	return 0;
+}
+
+static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+
+	if (ctx->n)
+		dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
+	if (ctx->e)
+		dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
+	if (ctx->d) {
+		memset(ctx->d, '\0', ctx->key_sz);
+		dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
+	}
+	qat_crypto_put_instance(ctx->inst);
+	ctx->n = NULL;
+	ctx->e = NULL;
+	ctx->d = NULL;
+}
+
+static struct akcipher_alg rsa = {
+	.encrypt = qat_rsa_enc,
+	.decrypt = qat_rsa_dec,
+	.sign = qat_rsa_dec,
+	.verify = qat_rsa_enc,
+	.set_pub_key = qat_rsa_setpubkey,
+	.set_priv_key = qat_rsa_setprivkey,
+	.max_size = qat_rsa_max_size,
+	.init = qat_rsa_init_tfm,
+	.exit = qat_rsa_exit_tfm,
+	.reqsize = sizeof(struct qat_asym_request) + 64,
+	.base = {
+		.cra_name = "rsa",
+		.cra_driver_name = "qat-rsa",
+		.cra_priority = 1000,
+		.cra_module = THIS_MODULE,
+		.cra_ctxsize = sizeof(struct qat_rsa_ctx),
+	},
+};
+
+static struct kpp_alg dh = {
+	.set_secret = qat_dh_set_secret,
+	.generate_public_key = qat_dh_compute_value,
+	.compute_shared_secret = qat_dh_compute_value,
+	.max_size = qat_dh_max_size,
+	.init = qat_dh_init_tfm,
+	.exit = qat_dh_exit_tfm,
+	.reqsize = sizeof(struct qat_asym_request) + 64,
+	.base = {
+		.cra_name = "dh",
+		.cra_driver_name = "qat-dh",
+		.cra_priority = 1000,
+		.cra_module = THIS_MODULE,
+		.cra_ctxsize = sizeof(struct qat_dh_ctx),
+	},
+};
+
+int qat_asym_algs_register(void)
+{
+	int ret = 0;
+
+	mutex_lock(&algs_lock);
+	if (++active_devs == 1) {
+		rsa.base.cra_flags = 0;
+		ret = crypto_register_akcipher(&rsa);
+		if (ret)
+			goto unlock;
+		ret = crypto_register_kpp(&dh);
+	}
+unlock:
+	mutex_unlock(&algs_lock);
+	return ret;
+}
+
+void qat_asym_algs_unregister(void)
+{
+	mutex_lock(&algs_lock);
+	if (--active_devs == 0) {
+		crypto_unregister_akcipher(&rsa);
+		crypto_unregister_kpp(&dh);
+	}
+	mutex_unlock(&algs_lock);
+}
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/qat_crypto.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/qat_crypto.c
new file mode 100644
index 0000000..3852d31
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -0,0 +1,370 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_transport.h"
+#include "adf_transport_access_macros.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "qat_crypto.h"
+#include "icp_qat_fw.h"
+
+#define SEC ADF_KERNEL_SEC
+
+static struct service_hndl qat_crypto;
+
+void qat_crypto_put_instance(struct qat_crypto_instance *inst)
+{
+	atomic_dec(&inst->refctr);
+	adf_dev_put(inst->accel_dev);
+}
+
+static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
+{
+	struct qat_crypto_instance *inst, *tmp;
+	int i;
+
+	list_for_each_entry_safe(inst, tmp, &accel_dev->crypto_list, list) {
+		for (i = 0; i < atomic_read(&inst->refctr); i++)
+			qat_crypto_put_instance(inst);
+
+		if (inst->sym_tx)
+			adf_remove_ring(inst->sym_tx);
+
+		if (inst->sym_rx)
+			adf_remove_ring(inst->sym_rx);
+
+		if (inst->pke_tx)
+			adf_remove_ring(inst->pke_tx);
+
+		if (inst->pke_rx)
+			adf_remove_ring(inst->pke_rx);
+
+		list_del(&inst->list);
+		kfree(inst);
+	}
+	return 0;
+}
+
+struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
+{
+	struct adf_accel_dev *accel_dev = NULL, *tmp_dev;
+	struct qat_crypto_instance *inst = NULL, *tmp_inst;
+	unsigned long best = ~0;
+
+	list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
+		unsigned long ctr;
+
+		if ((node == dev_to_node(&GET_DEV(tmp_dev)) ||
+		     dev_to_node(&GET_DEV(tmp_dev)) < 0) &&
+		    adf_dev_started(tmp_dev) &&
+		    !list_empty(&tmp_dev->crypto_list)) {
+			ctr = atomic_read(&tmp_dev->ref_count);
+			if (best > ctr) {
+				accel_dev = tmp_dev;
+				best = ctr;
+			}
+		}
+	}
+
+	if (!accel_dev) {
+		pr_info("QAT: Could not find a device on node %d\n", node);
+		/* Get any started device */
+		list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
+			if (adf_dev_started(tmp_dev) &&
+			    !list_empty(&tmp_dev->crypto_list)) {
+				accel_dev = tmp_dev;
+				break;
+			}
+		}
+	}
+
+	if (!accel_dev)
+		return NULL;
+
+	best = ~0;
+	list_for_each_entry(tmp_inst, &accel_dev->crypto_list, list) {
+		unsigned long ctr;
+
+		ctr = atomic_read(&tmp_inst->refctr);
+		if (best > ctr) {
+			inst = tmp_inst;
+			best = ctr;
+		}
+	}
+	if (inst) {
+		if (adf_dev_get(accel_dev)) {
+			dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
+			return NULL;
+		}
+		atomic_inc(&inst->refctr);
+	}
+	return inst;
+}
+
+/**
+ * qat_crypto_dev_config() - create dev config required to create crypto inst.
+ *
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function creates device configuration required to create crypto instances
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int qat_crypto_dev_config(struct adf_accel_dev *accel_dev)
+{
+	int cpus = num_online_cpus();
+	int banks = GET_MAX_BANKS(accel_dev);
+	int instances = min(cpus, banks);
+	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+	int i;
+	unsigned long val;
+
+	if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
+		goto err;
+	if (adf_cfg_section_add(accel_dev, "Accelerator0"))
+		goto err;
+	for (i = 0; i < instances; i++) {
+		val = i;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
+			 i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+		val = 128;
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		val = 512;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		val = 0;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		val = 2;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		val = 8;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		val = 10;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+						key, (void *)&val, ADF_DEC))
+			goto err;
+
+		val = ADF_COALESCING_DEF_TIME;
+		snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
+		if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+						key, (void *)&val, ADF_DEC))
+			goto err;
+	}
+
+	val = i;
+	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+					ADF_NUM_CY, (void *)&val, ADF_DEC))
+		goto err;
+
+	set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+	return 0;
+err:
+	dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n");
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(qat_crypto_dev_config);
+
+static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
+{
+	int i;
+	unsigned long bank;
+	unsigned long num_inst, num_msg_sym, num_msg_asym;
+	int msg_size;
+	struct qat_crypto_instance *inst;
+	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+
+	INIT_LIST_HEAD(&accel_dev->crypto_list);
+	strlcpy(key, ADF_NUM_CY, sizeof(key));
+	if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
+		return -EFAULT;
+
+	if (kstrtoul(val, 0, &num_inst))
+		return -EFAULT;
+
+	for (i = 0; i < num_inst; i++) {
+		inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
+				    dev_to_node(&GET_DEV(accel_dev)));
+		if (!inst)
+			goto err;
+
+		list_add_tail(&inst->list, &accel_dev->crypto_list);
+		inst->id = i;
+		atomic_set(&inst->refctr, 0);
+		inst->accel_dev = accel_dev;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
+		if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
+			goto err;
+
+		if (kstrtoul(val, 10, &bank))
+			goto err;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+		if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
+			goto err;
+
+		if (kstrtoul(val, 10, &num_msg_sym))
+			goto err;
+
+		num_msg_sym = num_msg_sym >> 1;
+
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+		if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
+			goto err;
+
+		if (kstrtoul(val, 10, &num_msg_asym))
+			goto err;
+		num_msg_asym = num_msg_asym >> 1;
+
+		msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+		if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
+				    msg_size, key, NULL, 0, &inst->sym_tx))
+			goto err;
+
+		msg_size = msg_size >> 1;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+		if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
+				    msg_size, key, NULL, 0, &inst->pke_tx))
+			goto err;
+
+		msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+		if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
+				    msg_size, key, qat_alg_callback, 0,
+				    &inst->sym_rx))
+			goto err;
+
+		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+		if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
+				    msg_size, key, qat_alg_asym_callback, 0,
+				    &inst->pke_rx))
+			goto err;
+	}
+	return 0;
+err:
+	qat_crypto_free_instances(accel_dev);
+	return -ENOMEM;
+}
+
+static int qat_crypto_init(struct adf_accel_dev *accel_dev)
+{
+	if (qat_crypto_create_instances(accel_dev))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev)
+{
+	return qat_crypto_free_instances(accel_dev);
+}
+
+static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev,
+				    enum adf_event event)
+{
+	int ret;
+
+	switch (event) {
+	case ADF_EVENT_INIT:
+		ret = qat_crypto_init(accel_dev);
+		break;
+	case ADF_EVENT_SHUTDOWN:
+		ret = qat_crypto_shutdown(accel_dev);
+		break;
+	case ADF_EVENT_RESTARTING:
+	case ADF_EVENT_RESTARTED:
+	case ADF_EVENT_START:
+	case ADF_EVENT_STOP:
+	default:
+		ret = 0;
+	}
+	return ret;
+}
+
+int qat_crypto_register(void)
+{
+	memset(&qat_crypto, 0, sizeof(qat_crypto));
+	qat_crypto.event_hld = qat_crypto_event_handler;
+	qat_crypto.name = "qat_crypto";
+	return adf_service_register(&qat_crypto);
+}
+
+int qat_crypto_unregister(void)
+{
+	return adf_service_unregister(&qat_crypto);
+}
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/qat_crypto.h b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/qat_crypto.h
new file mode 100644
index 0000000..dc0273f
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/qat_crypto.h
@@ -0,0 +1,93 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _QAT_CRYPTO_INSTANCE_H_
+#define _QAT_CRYPTO_INSTANCE_H_
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_la.h"
+
+struct qat_crypto_instance {
+	struct adf_etr_ring_data *sym_tx;
+	struct adf_etr_ring_data *sym_rx;
+	struct adf_etr_ring_data *pke_tx;
+	struct adf_etr_ring_data *pke_rx;
+	struct adf_accel_dev *accel_dev;
+	struct list_head list;
+	unsigned long state;
+	int id;
+	atomic_t refctr;
+};
+
+struct qat_crypto_request_buffs {
+	struct qat_alg_buf_list *bl;
+	dma_addr_t blp;
+	struct qat_alg_buf_list *blout;
+	dma_addr_t bloutp;
+	size_t sz;
+	size_t sz_out;
+};
+
+struct qat_crypto_request;
+
+struct qat_crypto_request {
+	struct icp_qat_fw_la_bulk_req req;
+	union {
+		struct qat_alg_aead_ctx *aead_ctx;
+		struct qat_alg_ablkcipher_ctx *ablkcipher_ctx;
+	};
+	union {
+		struct aead_request *aead_req;
+		struct ablkcipher_request *ablkcipher_req;
+	};
+	struct qat_crypto_request_buffs buf;
+	void (*cb)(struct icp_qat_fw_la_resp *resp,
+		   struct qat_crypto_request *req);
+};
+
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/qat_hal.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/qat_hal.c
new file mode 100644
index 0000000..8c4fd25
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/qat_hal.c
@@ -0,0 +1,1440 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_hal.h"
+#include "icp_qat_uclo.h"
+
+#define BAD_REGADDR	       0xffff
+#define MAX_RETRY_TIMES	   10000
+#define INIT_CTX_ARB_VALUE	0x0
+#define INIT_CTX_ENABLE_VALUE     0x0
+#define INIT_PC_VALUE	     0x0
+#define INIT_WAKEUP_EVENTS_VALUE  0x1
+#define INIT_SIG_EVENTS_VALUE     0x1
+#define INIT_CCENABLE_VALUE       0x2000
+#define RST_CSR_QAT_LSB	   20
+#define RST_CSR_AE_LSB		  0
+#define MC_TIMESTAMP_ENABLE       (0x1 << 7)
+
+#define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \
+	(~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \
+	(~(1 << CE_REG_PAR_ERR_BITPOS)))
+#define INSERT_IMMED_GPRA_CONST(inst, const_val) \
+	(inst = ((inst & 0xFFFF00C03FFull) | \
+		((((const_val) << 12) & 0x0FF00000ull) | \
+		(((const_val) << 10) & 0x0003FC00ull))))
+#define INSERT_IMMED_GPRB_CONST(inst, const_val) \
+	(inst = ((inst & 0xFFFF00FFF00ull) | \
+		((((const_val) << 12) & 0x0FF00000ull) | \
+		(((const_val) <<  0) & 0x000000FFull))))
+
+#define AE(handle, ae) handle->hal_handle->aes[ae]
+
+static const uint64_t inst_4b[] = {
+	0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull,
+	0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
+	0x0A021000000ull
+};
+
+static const uint64_t inst[] = {
+	0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull,
+	0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
+	0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull,
+	0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
+	0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull,
+	0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull,
+	0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull,
+	0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull,
+	0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull,
+	0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull,
+	0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull,
+	0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull,
+	0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull,
+	0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull,
+	0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull,
+	0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull,
+	0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull,
+	0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull,
+	0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull,
+	0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull,
+	0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull,
+	0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull,
+};
+
+void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
+			  unsigned char ae, unsigned int ctx_mask)
+{
+	AE(handle, ae).live_ctx_mask = ctx_mask;
+}
+
+#define CSR_RETRY_TIMES 500
+static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
+			     unsigned char ae, unsigned int csr,
+			     unsigned int *value)
+{
+	unsigned int iterations = CSR_RETRY_TIMES;
+
+	do {
+		*value = GET_AE_CSR(handle, ae, csr);
+		if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
+			return 0;
+	} while (iterations--);
+
+	pr_err("QAT: Read CSR timeout\n");
+	return -EFAULT;
+}
+
+static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
+			     unsigned char ae, unsigned int csr,
+			     unsigned int value)
+{
+	unsigned int iterations = CSR_RETRY_TIMES;
+
+	do {
+		SET_AE_CSR(handle, ae, csr, value);
+		if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
+			return 0;
+	} while (iterations--);
+
+	pr_err("QAT: Write CSR Timeout\n");
+	return -EFAULT;
+}
+
+static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
+				     unsigned char ae, unsigned char ctx,
+				     unsigned int *events)
+{
+	unsigned int cur_ctx;
+
+	qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+	qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events);
+	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
+			       unsigned char ae, unsigned int cycles,
+			       int chk_inactive)
+{
+	unsigned int base_cnt = 0, cur_cnt = 0;
+	unsigned int csr = (1 << ACS_ABO_BITPOS);
+	int times = MAX_RETRY_TIMES;
+	int elapsed_cycles = 0;
+
+	qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &base_cnt);
+	base_cnt &= 0xffff;
+	while ((int)cycles > elapsed_cycles && times--) {
+		if (chk_inactive)
+			qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &csr);
+
+		qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &cur_cnt);
+		cur_cnt &= 0xffff;
+		elapsed_cycles = cur_cnt - base_cnt;
+
+		if (elapsed_cycles < 0)
+			elapsed_cycles += 0x10000;
+
+		/* ensure at least 8 time cycles elapsed in wait_cycles */
+		if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS)))
+			return 0;
+	}
+	if (times < 0) {
+		pr_err("QAT: wait_num_cycles time out\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+#define CLR_BIT(wrd, bit) (wrd & ~(1 << bit))
+#define SET_BIT(wrd, bit) (wrd | 1 << bit)
+
+int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
+			    unsigned char ae, unsigned char mode)
+{
+	unsigned int csr, new_csr;
+
+	if ((mode != 4) && (mode != 8)) {
+		pr_err("QAT: bad ctx mode=%d\n", mode);
+		return -EINVAL;
+	}
+
+	/* Sets the accelaration engine context mode to either four or eight */
+	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+	csr = IGNORE_W1C_MASK & csr;
+	new_csr = (mode == 4) ?
+		SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
+		CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS);
+	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+	return 0;
+}
+
+int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
+			   unsigned char ae, unsigned char mode)
+{
+	unsigned int csr, new_csr;
+
+	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+	csr &= IGNORE_W1C_MASK;
+
+	new_csr = (mode) ?
+		SET_BIT(csr, CE_NN_MODE_BITPOS) :
+		CLR_BIT(csr, CE_NN_MODE_BITPOS);
+
+	if (new_csr != csr)
+		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+
+	return 0;
+}
+
+int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
+			   unsigned char ae, enum icp_qat_uof_regtype lm_type,
+			   unsigned char mode)
+{
+	unsigned int csr, new_csr;
+
+	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+	csr &= IGNORE_W1C_MASK;
+	switch (lm_type) {
+	case ICP_LMEM0:
+		new_csr = (mode) ?
+			SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) :
+			CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS);
+		break;
+	case ICP_LMEM1:
+		new_csr = (mode) ?
+			SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) :
+			CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS);
+		break;
+	default:
+		pr_err("QAT: lmType = 0x%x\n", lm_type);
+		return -EINVAL;
+	}
+
+	if (new_csr != csr)
+		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+	return 0;
+}
+
+static unsigned short qat_hal_get_reg_addr(unsigned int type,
+					   unsigned short reg_num)
+{
+	unsigned short reg_addr;
+
+	switch (type) {
+	case ICP_GPA_ABS:
+	case ICP_GPB_ABS:
+		reg_addr = 0x80 | (reg_num & 0x7f);
+		break;
+	case ICP_GPA_REL:
+	case ICP_GPB_REL:
+		reg_addr = reg_num & 0x1f;
+		break;
+	case ICP_SR_RD_REL:
+	case ICP_SR_WR_REL:
+	case ICP_SR_REL:
+		reg_addr = 0x180 | (reg_num & 0x1f);
+		break;
+	case ICP_SR_ABS:
+		reg_addr = 0x140 | ((reg_num & 0x3) << 1);
+		break;
+	case ICP_DR_RD_REL:
+	case ICP_DR_WR_REL:
+	case ICP_DR_REL:
+		reg_addr = 0x1c0 | (reg_num & 0x1f);
+		break;
+	case ICP_DR_ABS:
+		reg_addr = 0x100 | ((reg_num & 0x3) << 1);
+		break;
+	case ICP_NEIGH_REL:
+		reg_addr = 0x280 | (reg_num & 0x1f);
+		break;
+	case ICP_LMEM0:
+		reg_addr = 0x200;
+		break;
+	case ICP_LMEM1:
+		reg_addr = 0x220;
+		break;
+	case ICP_NO_DEST:
+		reg_addr = 0x300 | (reg_num & 0xff);
+		break;
+	default:
+		reg_addr = BAD_REGADDR;
+		break;
+	}
+	return reg_addr;
+}
+
+void qat_hal_reset(struct icp_qat_fw_loader_handle *handle)
+{
+	unsigned int ae_reset_csr;
+
+	ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET);
+	ae_reset_csr |= handle->hal_handle->ae_mask << RST_CSR_AE_LSB;
+	ae_reset_csr |= handle->hal_handle->slice_mask << RST_CSR_QAT_LSB;
+	SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr);
+}
+
+static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
+				unsigned char ae, unsigned int ctx_mask,
+				unsigned int ae_csr, unsigned int csr_val)
+{
+	unsigned int ctx, cur_ctx;
+
+	qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+
+	for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+		if (!(ctx_mask & (1 << ctx)))
+			continue;
+		qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+		qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val);
+	}
+
+	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static void qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
+				unsigned char ae, unsigned char ctx,
+				unsigned int ae_csr, unsigned int *csr_val)
+{
+	unsigned int cur_ctx;
+
+	qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+	qat_hal_rd_ae_csr(handle, ae, ae_csr, csr_val);
+	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
+				  unsigned char ae, unsigned int ctx_mask,
+				  unsigned int events)
+{
+	unsigned int ctx, cur_ctx;
+
+	qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+	for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+		if (!(ctx_mask & (1 << ctx)))
+			continue;
+		qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+		qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events);
+	}
+	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
+				     unsigned char ae, unsigned int ctx_mask,
+				     unsigned int events)
+{
+	unsigned int ctx, cur_ctx;
+
+	qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+	for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+		if (!(ctx_mask & (1 << ctx)))
+			continue;
+		qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+		qat_hal_wr_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT,
+				  events);
+	}
+	qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
+{
+	unsigned int base_cnt, cur_cnt;
+	unsigned char ae;
+	int times = MAX_RETRY_TIMES;
+
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
+				  (unsigned int *)&base_cnt);
+		base_cnt &= 0xffff;
+
+		do {
+			qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
+					  (unsigned int *)&cur_cnt);
+			cur_cnt &= 0xffff;
+		} while (times-- && (cur_cnt == base_cnt));
+
+		if (times < 0) {
+			pr_err("QAT: AE%d is inactive!!\n", ae);
+			return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
+			    unsigned int ae)
+{
+	unsigned int enable = 0, active = 0;
+
+	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &enable);
+	qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &active);
+	if ((enable & (0xff << CE_ENABLE_BITPOS)) ||
+	    (active & (1 << ACS_ABO_BITPOS)))
+		return 1;
+	else
+		return 0;
+}
+
+static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
+{
+	unsigned int misc_ctl;
+	unsigned char ae;
+
+	/* stop the timestamp timers */
+	misc_ctl = GET_GLB_CSR(handle, MISC_CONTROL);
+	if (misc_ctl & MC_TIMESTAMP_ENABLE)
+		SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl &
+			    (~MC_TIMESTAMP_ENABLE));
+
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0);
+		qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0);
+	}
+	/* start timestamp timers */
+	SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl | MC_TIMESTAMP_ENABLE);
+}
+
+#define ESRAM_AUTO_TINIT	BIT(2)
+#define ESRAM_AUTO_TINIT_DONE	BIT(3)
+#define ESRAM_AUTO_INIT_USED_CYCLES (1640)
+#define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C
+static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
+{
+	void __iomem *csr_addr =
+			(void __iomem *)((uintptr_t)handle->hal_ep_csr_addr_v +
+			ESRAM_AUTO_INIT_CSR_OFFSET);
+	unsigned int csr_val;
+	int times = 30;
+
+	if (handle->pci_dev->device != ADF_DH895XCC_PCI_DEVICE_ID)
+		return 0;
+
+	csr_val = ADF_CSR_RD(csr_addr, 0);
+	if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE))
+		return 0;
+
+	csr_val = ADF_CSR_RD(csr_addr, 0);
+	csr_val |= ESRAM_AUTO_TINIT;
+	ADF_CSR_WR(csr_addr, 0, csr_val);
+
+	do {
+		qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0);
+		csr_val = ADF_CSR_RD(csr_addr, 0);
+	} while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--);
+	if ((times < 0)) {
+		pr_err("QAT: Fail to init eSram!\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+#define SHRAM_INIT_CYCLES 2060
+int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle)
+{
+	unsigned int ae_reset_csr;
+	unsigned char ae;
+	unsigned int clk_csr;
+	unsigned int times = 100;
+	unsigned int csr;
+
+	/* write to the reset csr */
+	ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET);
+	ae_reset_csr &= ~(handle->hal_handle->ae_mask << RST_CSR_AE_LSB);
+	ae_reset_csr &= ~(handle->hal_handle->slice_mask << RST_CSR_QAT_LSB);
+	do {
+		SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr);
+		if (!(times--))
+			goto out_err;
+		csr = GET_GLB_CSR(handle, ICP_RESET);
+	} while ((handle->hal_handle->ae_mask |
+		 (handle->hal_handle->slice_mask << RST_CSR_QAT_LSB)) & csr);
+	/* enable clock */
+	clk_csr = GET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE);
+	clk_csr |= handle->hal_handle->ae_mask << 0;
+	clk_csr |= handle->hal_handle->slice_mask << 20;
+	SET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE, clk_csr);
+	if (qat_hal_check_ae_alive(handle))
+		goto out_err;
+
+	/* Set undefined power-up/reset states to reasonable default values */
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
+				  INIT_CTX_ENABLE_VALUE);
+		qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX,
+				    CTX_STS_INDIRECT,
+				    handle->hal_handle->upc_mask &
+				    INIT_PC_VALUE);
+		qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
+		qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
+		qat_hal_put_wakeup_event(handle, ae,
+					 ICP_QAT_UCLO_AE_ALL_CTX,
+					 INIT_WAKEUP_EVENTS_VALUE);
+		qat_hal_put_sig_event(handle, ae,
+				      ICP_QAT_UCLO_AE_ALL_CTX,
+				      INIT_SIG_EVENTS_VALUE);
+	}
+	if (qat_hal_init_esram(handle))
+		goto out_err;
+	if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0))
+		goto out_err;
+	qat_hal_reset_timestamp(handle);
+
+	return 0;
+out_err:
+	pr_err("QAT: failed to get device out of reset\n");
+	return -EFAULT;
+}
+
+static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
+				unsigned char ae, unsigned int ctx_mask)
+{
+	unsigned int ctx;
+
+	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
+	ctx &= IGNORE_W1C_MASK &
+		(~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
+	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
+}
+
+static uint64_t qat_hal_parity_64bit(uint64_t word)
+{
+	word ^= word >> 1;
+	word ^= word >> 2;
+	word ^= word >> 4;
+	word ^= word >> 8;
+	word ^= word >> 16;
+	word ^= word >> 32;
+	return word & 1;
+}
+
+static uint64_t qat_hal_set_uword_ecc(uint64_t uword)
+{
+	uint64_t bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL,
+		bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL,
+		bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL,
+		bit6_mask = 0xdaf69a46910ULL;
+
+	/* clear the ecc bits */
+	uword &= ~(0x7fULL << 0x2C);
+	uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C;
+	uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D;
+	uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E;
+	uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F;
+	uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30;
+	uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31;
+	uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32;
+	return uword;
+}
+
+void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
+		       unsigned char ae, unsigned int uaddr,
+		       unsigned int words_num, uint64_t *uword)
+{
+	unsigned int ustore_addr;
+	unsigned int i;
+
+	qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+	uaddr |= UA_ECS;
+	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+	for (i = 0; i < words_num; i++) {
+		unsigned int uwrd_lo, uwrd_hi;
+		uint64_t tmp;
+
+		tmp = qat_hal_set_uword_ecc(uword[i]);
+		uwrd_lo = (unsigned int)(tmp & 0xffffffff);
+		uwrd_hi = (unsigned int)(tmp >> 0x20);
+		qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
+		qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
+	}
+	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+}
+
+static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
+			       unsigned char ae, unsigned int ctx_mask)
+{
+	unsigned int ctx;
+
+	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
+	ctx &= IGNORE_W1C_MASK;
+	ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
+	ctx |= (ctx_mask << CE_ENABLE_BITPOS);
+	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
+}
+
+static void qat_hal_clear_xfer(struct icp_qat_fw_loader_handle *handle)
+{
+	unsigned char ae;
+	unsigned short reg;
+
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) {
+			qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS,
+					     reg, 0);
+			qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS,
+					     reg, 0);
+		}
+	}
+}
+
+static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
+{
+	unsigned char ae;
+	unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX;
+	int times = MAX_RETRY_TIMES;
+	unsigned int csr_val = 0;
+	unsigned int savctx = 0;
+	int ret = 0;
+
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
+		csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
+		qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
+		qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val);
+		csr_val &= IGNORE_W1C_MASK;
+		csr_val |= CE_NN_MODE;
+		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
+		qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst),
+				  (uint64_t *)inst);
+		qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
+				    handle->hal_handle->upc_mask &
+				    INIT_PC_VALUE);
+		qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
+		qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
+		qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
+		qat_hal_wr_indr_csr(handle, ae, ctx_mask,
+				    CTX_SIG_EVENTS_INDIRECT, 0);
+		qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
+		qat_hal_enable_ctx(handle, ae, ctx_mask);
+	}
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		/* wait for AE to finish */
+		do {
+			ret = qat_hal_wait_cycles(handle, ae, 20, 1);
+		} while (ret && times--);
+
+		if (times < 0) {
+			pr_err("QAT: clear GPR of AE %d failed", ae);
+			return -EINVAL;
+		}
+		qat_hal_disable_ctx(handle, ae, ctx_mask);
+		qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
+				  savctx & ACS_ACNO);
+		qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
+				  INIT_CTX_ENABLE_VALUE);
+		qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
+				    handle->hal_handle->upc_mask &
+				    INIT_PC_VALUE);
+		qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
+		qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
+		qat_hal_put_wakeup_event(handle, ae, ctx_mask,
+					 INIT_WAKEUP_EVENTS_VALUE);
+		qat_hal_put_sig_event(handle, ae, ctx_mask,
+				      INIT_SIG_EVENTS_VALUE);
+	}
+	return 0;
+}
+
+#define ICP_QAT_AE_OFFSET	0x20000
+#define ICP_QAT_CAP_OFFSET       (ICP_QAT_AE_OFFSET + 0x10000)
+#define LOCAL_TO_XFER_REG_OFFSET    0x800
+#define ICP_QAT_EP_OFFSET	0x3a000
+int qat_hal_init(struct adf_accel_dev *accel_dev)
+{
+	unsigned char ae;
+	unsigned int max_en_ae_id = 0;
+	struct icp_qat_fw_loader_handle *handle;
+	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_bar *misc_bar =
+			&pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)];
+	struct adf_bar *sram_bar;
+
+	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+	if (!handle)
+		return -ENOMEM;
+
+	handle->hal_cap_g_ctl_csr_addr_v =
+		(void __iomem *)((uintptr_t)misc_bar->virt_addr +
+				 ICP_QAT_CAP_OFFSET);
+	handle->hal_cap_ae_xfer_csr_addr_v =
+		(void __iomem *)((uintptr_t)misc_bar->virt_addr +
+				 ICP_QAT_AE_OFFSET);
+	handle->hal_ep_csr_addr_v =
+		(void __iomem *)((uintptr_t)misc_bar->virt_addr +
+				 ICP_QAT_EP_OFFSET);
+	handle->hal_cap_ae_local_csr_addr_v =
+		(void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v +
+				 LOCAL_TO_XFER_REG_OFFSET);
+	handle->pci_dev = pci_info->pci_dev;
+	if (handle->pci_dev->device == ADF_DH895XCC_PCI_DEVICE_ID) {
+		sram_bar =
+			&pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
+		handle->hal_sram_addr_v = sram_bar->virt_addr;
+	}
+	handle->fw_auth = (handle->pci_dev->device ==
+			   ADF_DH895XCC_PCI_DEVICE_ID) ? false : true;
+	handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
+	if (!handle->hal_handle)
+		goto out_hal_handle;
+	handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid;
+	handle->hal_handle->ae_mask = hw_data->ae_mask;
+	handle->hal_handle->slice_mask = hw_data->accel_mask;
+	/* create AE objects */
+	handle->hal_handle->upc_mask = 0x1ffff;
+	handle->hal_handle->max_ustore = 0x4000;
+	for (ae = 0; ae < ICP_QAT_UCLO_MAX_AE; ae++) {
+		if (!(hw_data->ae_mask & (1 << ae)))
+			continue;
+		handle->hal_handle->aes[ae].free_addr = 0;
+		handle->hal_handle->aes[ae].free_size =
+		    handle->hal_handle->max_ustore;
+		handle->hal_handle->aes[ae].ustore_size =
+		    handle->hal_handle->max_ustore;
+		handle->hal_handle->aes[ae].live_ctx_mask =
+						ICP_QAT_UCLO_AE_ALL_CTX;
+		max_en_ae_id = ae;
+	}
+	handle->hal_handle->ae_max_num = max_en_ae_id + 1;
+	/* take all AEs out of reset */
+	if (qat_hal_clr_reset(handle)) {
+		dev_err(&GET_DEV(accel_dev), "qat_hal_clr_reset error\n");
+		goto out_err;
+	}
+	qat_hal_clear_xfer(handle);
+	if (!handle->fw_auth) {
+		if (qat_hal_clear_gpr(handle))
+			goto out_err;
+	}
+
+	/* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		unsigned int csr_val = 0;
+
+		qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val);
+		csr_val |= 0x1;
+		qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
+	}
+	accel_dev->fw_loader->fw_loader = handle;
+	return 0;
+
+out_err:
+	kfree(handle->hal_handle);
+out_hal_handle:
+	kfree(handle);
+	return -EFAULT;
+}
+
+void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle)
+{
+	if (!handle)
+		return;
+	kfree(handle->hal_handle);
+	kfree(handle);
+}
+
+void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+		   unsigned int ctx_mask)
+{
+	int retry = 0;
+	unsigned int fcu_sts = 0;
+
+	if (handle->fw_auth) {
+		SET_CAP_CSR(handle, FCU_CONTROL, FCU_CTRL_CMD_START);
+		do {
+			msleep(FW_AUTH_WAIT_PERIOD);
+			fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
+			if (((fcu_sts >> FCU_STS_DONE_POS) & 0x1))
+				return;
+		} while (retry++ < FW_AUTH_MAX_RETRY);
+		pr_err("QAT: start error (AE 0x%x FCU_STS = 0x%x)\n", ae,
+		       fcu_sts);
+	} else {
+		qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) &
+				 ICP_QAT_UCLO_AE_ALL_CTX, 0x10000);
+		qat_hal_enable_ctx(handle, ae, ctx_mask);
+	}
+}
+
+void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+		  unsigned int ctx_mask)
+{
+	if (!handle->fw_auth)
+		qat_hal_disable_ctx(handle, ae, ctx_mask);
+}
+
+void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
+		    unsigned char ae, unsigned int ctx_mask, unsigned int upc)
+{
+	qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
+			    handle->hal_handle->upc_mask & upc);
+}
+
+static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
+			       unsigned char ae, unsigned int uaddr,
+			       unsigned int words_num, uint64_t *uword)
+{
+	unsigned int i, uwrd_lo, uwrd_hi;
+	unsigned int ustore_addr, misc_control;
+
+	qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &misc_control);
+	qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL,
+			  misc_control & 0xfffffffb);
+	qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+	uaddr |= UA_ECS;
+	for (i = 0; i < words_num; i++) {
+		qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+		uaddr++;
+		qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER, &uwrd_lo);
+		qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER, &uwrd_hi);
+		uword[i] = uwrd_hi;
+		uword[i] = (uword[i] << 0x20) | uwrd_lo;
+	}
+	qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control);
+	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+}
+
+void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
+		     unsigned char ae, unsigned int uaddr,
+		     unsigned int words_num, unsigned int *data)
+{
+	unsigned int i, ustore_addr;
+
+	qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+	uaddr |= UA_ECS;
+	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+	for (i = 0; i < words_num; i++) {
+		unsigned int uwrd_lo, uwrd_hi, tmp;
+
+		uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) |
+			  ((data[i] & 0xff00) << 2) |
+			  (0x3 << 8) | (data[i] & 0xff);
+		uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28);
+		uwrd_hi |= (hweight32(data[i] & 0xffff) & 0x1) << 8;
+		tmp = ((data[i] >> 0x10) & 0xffff);
+		uwrd_hi |= (hweight32(tmp) & 0x1) << 9;
+		qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
+		qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
+	}
+	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+}
+
+#define MAX_EXEC_INST 100
+static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
+				   unsigned char ae, unsigned char ctx,
+				   uint64_t *micro_inst, unsigned int inst_num,
+				   int code_off, unsigned int max_cycle,
+				   unsigned int *endpc)
+{
+	uint64_t savuwords[MAX_EXEC_INST];
+	unsigned int ind_lm_addr0, ind_lm_addr1;
+	unsigned int ind_lm_addr_byte0, ind_lm_addr_byte1;
+	unsigned int ind_cnt_sig;
+	unsigned int ind_sig, act_sig;
+	unsigned int csr_val = 0, newcsr_val;
+	unsigned int savctx;
+	unsigned int savcc, wakeup_events, savpc;
+	unsigned int ctxarb_ctl, ctx_enables;
+
+	if ((inst_num > handle->hal_handle->max_ustore) || !micro_inst) {
+		pr_err("QAT: invalid instruction num %d\n", inst_num);
+		return -EINVAL;
+	}
+	/* save current context */
+	qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0);
+	qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1);
+	qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX,
+			    &ind_lm_addr_byte0);
+	qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX,
+			    &ind_lm_addr_byte1);
+	if (inst_num <= MAX_EXEC_INST)
+		qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
+	qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
+	qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &savpc);
+	savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
+	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+	ctx_enables &= IGNORE_W1C_MASK;
+	qat_hal_rd_ae_csr(handle, ae, CC_ENABLE, &savcc);
+	qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
+	qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_ctl);
+	qat_hal_rd_indr_csr(handle, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT,
+			    &ind_cnt_sig);
+	qat_hal_rd_indr_csr(handle, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &ind_sig);
+	qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, &act_sig);
+	/* execute micro codes */
+	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+	qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
+	qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0);
+	qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO);
+	if (code_off)
+		qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff);
+	qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY);
+	qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0);
+	qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
+	qat_hal_enable_ctx(handle, ae, (1 << ctx));
+	/* wait for micro codes to finish */
+	if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0)
+		return -EFAULT;
+	if (endpc) {
+		unsigned int ctx_status;
+
+		qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT,
+				    &ctx_status);
+		*endpc = ctx_status & handle->hal_handle->upc_mask;
+	}
+	/* retore to saved context */
+	qat_hal_disable_ctx(handle, ae, (1 << ctx));
+	if (inst_num <= MAX_EXEC_INST)
+		qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords);
+	qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
+	qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT,
+			    handle->hal_handle->upc_mask & savpc);
+	qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
+	newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
+	qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
+	qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
+	qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO);
+	qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl);
+	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+			    LM_ADDR_0_INDIRECT, ind_lm_addr0);
+	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+			    LM_ADDR_1_INDIRECT, ind_lm_addr1);
+	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+			    INDIRECT_LM_ADDR_0_BYTE_INDEX, ind_lm_addr_byte0);
+	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+			    INDIRECT_LM_ADDR_1_BYTE_INDEX, ind_lm_addr_byte1);
+	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+			    FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig);
+	qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+			    CTX_SIG_EVENTS_INDIRECT, ind_sig);
+	qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig);
+	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+
+	return 0;
+}
+
+static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
+			      unsigned char ae, unsigned char ctx,
+			      enum icp_qat_uof_regtype reg_type,
+			      unsigned short reg_num, unsigned int *data)
+{
+	unsigned int savctx, uaddr, uwrd_lo, uwrd_hi;
+	unsigned int ctxarb_cntl, ustore_addr, ctx_enables;
+	unsigned short reg_addr;
+	int status = 0;
+	uint64_t insts, savuword;
+
+	reg_addr = qat_hal_get_reg_addr(reg_type, reg_num);
+	if (reg_addr == BAD_REGADDR) {
+		pr_err("QAT: bad regaddr=0x%x\n", reg_addr);
+		return -EINVAL;
+	}
+	switch (reg_type) {
+	case ICP_GPA_REL:
+		insts = 0xA070000000ull | (reg_addr & 0x3ff);
+		break;
+	default:
+		insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
+		break;
+	}
+	qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
+	qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_cntl);
+	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+	ctx_enables &= IGNORE_W1C_MASK;
+	if (ctx != (savctx & ACS_ACNO))
+		qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
+				  ctx & ACS_ACNO);
+	qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
+	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+	qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+	uaddr = UA_ECS;
+	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+	insts = qat_hal_set_uword_ecc(insts);
+	uwrd_lo = (unsigned int)(insts & 0xffffffff);
+	uwrd_hi = (unsigned int)(insts >> 0x20);
+	qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
+	qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
+	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+	/* delay for at least 8 cycles */
+	qat_hal_wait_cycles(handle, ae, 0x8, 0);
+	/*
+	 * read ALU output
+	 * the instruction should have been executed
+	 * prior to clearing the ECS in putUwords
+	 */
+	qat_hal_rd_ae_csr(handle, ae, ALU_OUT, data);
+	qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+	qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
+	if (ctx != (savctx & ACS_ACNO))
+		qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
+				  savctx & ACS_ACNO);
+	qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl);
+	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+
+	return status;
+}
+
+static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle,
+			      unsigned char ae, unsigned char ctx,
+			      enum icp_qat_uof_regtype reg_type,
+			      unsigned short reg_num, unsigned int data)
+{
+	unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo;
+	uint64_t insts[] = {
+		0x0F440000000ull,
+		0x0F040000000ull,
+		0x0F0000C0300ull,
+		0x0E000010000ull
+	};
+	const int num_inst = ARRAY_SIZE(insts), code_off = 1;
+	const int imm_w1 = 0, imm_w0 = 1;
+
+	dest_addr = qat_hal_get_reg_addr(reg_type, reg_num);
+	if (dest_addr == BAD_REGADDR) {
+		pr_err("QAT: bad destAddr=0x%x\n", dest_addr);
+		return -EINVAL;
+	}
+
+	data16lo = 0xffff & data;
+	data16hi = 0xffff & (data >> 0x10);
+	src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
+					  (0xff & data16hi));
+	src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
+					   (0xff & data16lo));
+	switch (reg_type) {
+	case ICP_GPA_REL:
+		insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
+		    ((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
+		insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
+		    ((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
+		break;
+	default:
+		insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
+		    ((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
+
+		insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
+		    ((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
+		break;
+	}
+
+	return qat_hal_exec_micro_inst(handle, ae, ctx, insts, num_inst,
+				       code_off, num_inst * 0x5, NULL);
+}
+
+int qat_hal_get_ins_num(void)
+{
+	return ARRAY_SIZE(inst_4b);
+}
+
+static int qat_hal_concat_micro_code(uint64_t *micro_inst,
+				     unsigned int inst_num, unsigned int size,
+				     unsigned int addr, unsigned int *value)
+{
+	int i;
+	unsigned int cur_value;
+	const uint64_t *inst_arr;
+	int fixup_offset;
+	int usize = 0;
+	int orig_num;
+
+	orig_num = inst_num;
+	cur_value = value[0];
+	inst_arr = inst_4b;
+	usize = ARRAY_SIZE(inst_4b);
+	fixup_offset = inst_num;
+	for (i = 0; i < usize; i++)
+		micro_inst[inst_num++] = inst_arr[i];
+	INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr));
+	fixup_offset++;
+	INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0);
+	fixup_offset++;
+	INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0));
+	fixup_offset++;
+	INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10));
+
+	return inst_num - orig_num;
+}
+
+static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle,
+				      unsigned char ae, unsigned char ctx,
+				      int *pfirst_exec, uint64_t *micro_inst,
+				      unsigned int inst_num)
+{
+	int stat = 0;
+	unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0;
+	unsigned int gprb0 = 0, gprb1 = 0;
+
+	if (*pfirst_exec) {
+		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0);
+		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1);
+		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2);
+		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0);
+		qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1);
+		*pfirst_exec = 0;
+	}
+	stat = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, inst_num, 1,
+				       inst_num * 0x5, NULL);
+	if (stat != 0)
+		return -EFAULT;
+	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0);
+	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1);
+	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2);
+	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0);
+	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1);
+
+	return 0;
+}
+
+int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
+			unsigned char ae,
+			struct icp_qat_uof_batch_init *lm_init_header)
+{
+	struct icp_qat_uof_batch_init *plm_init;
+	uint64_t *micro_inst_arry;
+	int micro_inst_num;
+	int alloc_inst_size;
+	int first_exec = 1;
+	int stat = 0;
+
+	plm_init = lm_init_header->next;
+	alloc_inst_size = lm_init_header->size;
+	if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore)
+		alloc_inst_size = handle->hal_handle->max_ustore;
+	micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(uint64_t),
+					GFP_KERNEL);
+	if (!micro_inst_arry)
+		return -ENOMEM;
+	micro_inst_num = 0;
+	while (plm_init) {
+		unsigned int addr, *value, size;
+
+		ae = plm_init->ae;
+		addr = plm_init->addr;
+		value = plm_init->value;
+		size = plm_init->size;
+		micro_inst_num += qat_hal_concat_micro_code(micro_inst_arry,
+							    micro_inst_num,
+							    size, addr, value);
+		plm_init = plm_init->next;
+	}
+	/* exec micro codes */
+	if (micro_inst_arry && (micro_inst_num > 0)) {
+		micro_inst_arry[micro_inst_num++] = 0x0E000010000ull;
+		stat = qat_hal_exec_micro_init_lm(handle, ae, 0, &first_exec,
+						  micro_inst_arry,
+						  micro_inst_num);
+	}
+	kfree(micro_inst_arry);
+	return stat;
+}
+
+static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
+				   unsigned char ae, unsigned char ctx,
+				   enum icp_qat_uof_regtype reg_type,
+				   unsigned short reg_num, unsigned int val)
+{
+	int status = 0;
+	unsigned int reg_addr;
+	unsigned int ctx_enables;
+	unsigned short mask;
+	unsigned short dr_offset = 0x10;
+
+	status = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+	if (CE_INUSE_CONTEXTS & ctx_enables) {
+		if (ctx & 0x1) {
+			pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
+			return -EINVAL;
+		}
+		mask = 0x1f;
+		dr_offset = 0x20;
+	} else {
+		mask = 0x0f;
+	}
+	if (reg_num & ~mask)
+		return -EINVAL;
+	reg_addr = reg_num + (ctx << 0x5);
+	switch (reg_type) {
+	case ICP_SR_RD_REL:
+	case ICP_SR_REL:
+		SET_AE_XFER(handle, ae, reg_addr, val);
+		break;
+	case ICP_DR_RD_REL:
+	case ICP_DR_REL:
+		SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val);
+		break;
+	default:
+		status = -EINVAL;
+		break;
+	}
+	return status;
+}
+
+static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+				   unsigned char ae, unsigned char ctx,
+				   enum icp_qat_uof_regtype reg_type,
+				   unsigned short reg_num, unsigned int data)
+{
+	unsigned int gprval, ctx_enables;
+	unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi,
+	    data16low;
+	unsigned short reg_mask;
+	int status = 0;
+	uint64_t micro_inst[] = {
+		0x0F440000000ull,
+		0x0F040000000ull,
+		0x0A000000000ull,
+		0x0F0000C0300ull,
+		0x0E000010000ull
+	};
+	const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
+	const unsigned short gprnum = 0, dly = num_inst * 0x5;
+
+	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+	if (CE_INUSE_CONTEXTS & ctx_enables) {
+		if (ctx & 0x1) {
+			pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
+			return -EINVAL;
+		}
+		reg_mask = (unsigned short)~0x1f;
+	} else {
+		reg_mask = (unsigned short)~0xf;
+	}
+	if (reg_num & reg_mask)
+		return -EINVAL;
+	xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num);
+	if (xfr_addr == BAD_REGADDR) {
+		pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
+		return -EINVAL;
+	}
+	qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
+	gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
+	data16low = 0xffff & data;
+	data16hi = 0xffff & (data >> 0x10);
+	src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
+					  (unsigned short)(0xff & data16hi));
+	src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
+					   (unsigned short)(0xff & data16low));
+	micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) |
+	    ((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
+	micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) |
+	    ((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
+	micro_inst[0x2] = micro_inst[0x2] |
+	    ((xfr_addr & 0x3ff) << 20) | ((gpr_addr & 0x3ff) << 10);
+	status = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, num_inst,
+					 code_off, dly, NULL);
+	qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval);
+	return status;
+}
+
+static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
+			      unsigned char ae, unsigned char ctx,
+			      unsigned short nn, unsigned int val)
+{
+	unsigned int ctx_enables;
+	int stat = 0;
+
+	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+	ctx_enables &= IGNORE_W1C_MASK;
+	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
+
+	stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val);
+	qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+	return stat;
+}
+
+static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle
+				      *handle, unsigned char ae,
+				      unsigned short absreg_num,
+				      unsigned short *relreg,
+				      unsigned char *ctx)
+{
+	unsigned int ctx_enables;
+
+	qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+	if (ctx_enables & CE_INUSE_CONTEXTS) {
+		/* 4-ctx mode */
+		*relreg = absreg_num & 0x1F;
+		*ctx = (absreg_num >> 0x4) & 0x6;
+	} else {
+		/* 8-ctx mode */
+		*relreg = absreg_num & 0x0F;
+		*ctx = (absreg_num >> 0x4) & 0x7;
+	}
+	return 0;
+}
+
+int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
+		     unsigned char ae, unsigned char ctx_mask,
+		     enum icp_qat_uof_regtype reg_type,
+		     unsigned short reg_num, unsigned int regdata)
+{
+	int stat = 0;
+	unsigned short reg;
+	unsigned char ctx = 0;
+	enum icp_qat_uof_regtype type;
+
+	if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG)
+		return -EINVAL;
+
+	do {
+		if (ctx_mask == 0) {
+			qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
+						   &ctx);
+			type = reg_type - 1;
+		} else {
+			reg = reg_num;
+			type = reg_type;
+			if (!test_bit(ctx, (unsigned long *)&ctx_mask))
+				continue;
+		}
+		stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata);
+		if (stat) {
+			pr_err("QAT: write gpr fail\n");
+			return -EINVAL;
+		}
+	} while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
+
+	return 0;
+}
+
+int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+			 unsigned char ae, unsigned char ctx_mask,
+			 enum icp_qat_uof_regtype reg_type,
+			 unsigned short reg_num, unsigned int regdata)
+{
+	int stat = 0;
+	unsigned short reg;
+	unsigned char ctx = 0;
+	enum icp_qat_uof_regtype type;
+
+	if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
+		return -EINVAL;
+
+	do {
+		if (ctx_mask == 0) {
+			qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
+						   &ctx);
+			type = reg_type - 3;
+		} else {
+			reg = reg_num;
+			type = reg_type;
+			if (!test_bit(ctx, (unsigned long *)&ctx_mask))
+				continue;
+		}
+		stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, type, reg,
+					       regdata);
+		if (stat) {
+			pr_err("QAT: write wr xfer fail\n");
+			return -EINVAL;
+		}
+	} while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
+
+	return 0;
+}
+
+int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
+			 unsigned char ae, unsigned char ctx_mask,
+			 enum icp_qat_uof_regtype reg_type,
+			 unsigned short reg_num, unsigned int regdata)
+{
+	int stat = 0;
+	unsigned short reg;
+	unsigned char ctx = 0;
+	enum icp_qat_uof_regtype type;
+
+	if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
+		return -EINVAL;
+
+	do {
+		if (ctx_mask == 0) {
+			qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
+						   &ctx);
+			type = reg_type - 3;
+		} else {
+			reg = reg_num;
+			type = reg_type;
+			if (!test_bit(ctx, (unsigned long *)&ctx_mask))
+				continue;
+		}
+		stat = qat_hal_put_rel_rd_xfer(handle, ae, ctx, type, reg,
+					       regdata);
+		if (stat) {
+			pr_err("QAT: write rd xfer fail\n");
+			return -EINVAL;
+		}
+	} while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
+
+	return 0;
+}
+
+int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
+		    unsigned char ae, unsigned char ctx_mask,
+		    unsigned short reg_num, unsigned int regdata)
+{
+	int stat = 0;
+	unsigned char ctx;
+
+	if (ctx_mask == 0)
+		return -EINVAL;
+
+	for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+		if (!test_bit(ctx, (unsigned long *)&ctx_mask))
+			continue;
+		stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata);
+		if (stat) {
+			pr_err("QAT: write neigh error\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/qat_uclo.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/qat_uclo.c
new file mode 100644
index 0000000..4f1cd83
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -0,0 +1,1674 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_uclo.h"
+#include "icp_qat_hal.h"
+#include "icp_qat_fw_loader_handle.h"
+
+#define UWORD_CPYBUF_SIZE 1024
+#define INVLD_UWORD 0xffffffffffull
+#define PID_MINOR_REV 0xf
+#define PID_MAJOR_REV (0xf << 4)
+
+static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
+				 unsigned int ae, unsigned int image_num)
+{
+	struct icp_qat_uclo_aedata *ae_data;
+	struct icp_qat_uclo_encapme *encap_image;
+	struct icp_qat_uclo_page *page = NULL;
+	struct icp_qat_uclo_aeslice *ae_slice = NULL;
+
+	ae_data = &obj_handle->ae_data[ae];
+	encap_image = &obj_handle->ae_uimage[image_num];
+	ae_slice = &ae_data->ae_slices[ae_data->slice_num];
+	ae_slice->encap_image = encap_image;
+
+	if (encap_image->img_ptr) {
+		ae_slice->ctx_mask_assigned =
+					encap_image->img_ptr->ctx_assigned;
+		ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
+	} else {
+		ae_slice->ctx_mask_assigned = 0;
+	}
+	ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
+	if (!ae_slice->region)
+		return -ENOMEM;
+	ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
+	if (!ae_slice->page)
+		goto out_err;
+	page = ae_slice->page;
+	page->encap_page = encap_image->page;
+	ae_slice->page->region = ae_slice->region;
+	ae_data->slice_num++;
+	return 0;
+out_err:
+	kfree(ae_slice->region);
+	ae_slice->region = NULL;
+	return -ENOMEM;
+}
+
+static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
+{
+	unsigned int i;
+
+	if (!ae_data) {
+		pr_err("QAT: bad argument, ae_data is NULL\n ");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < ae_data->slice_num; i++) {
+		kfree(ae_data->ae_slices[i].region);
+		ae_data->ae_slices[i].region = NULL;
+		kfree(ae_data->ae_slices[i].page);
+		ae_data->ae_slices[i].page = NULL;
+	}
+	return 0;
+}
+
+static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
+				 unsigned int str_offset)
+{
+	if ((!str_table->table_len) || (str_offset > str_table->table_len))
+		return NULL;
+	return (char *)(((uintptr_t)(str_table->strings)) + str_offset);
+}
+
+static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
+{
+	int maj = hdr->maj_ver & 0xff;
+	int min = hdr->min_ver & 0xff;
+
+	if (hdr->file_id != ICP_QAT_UOF_FID) {
+		pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
+		return -EINVAL;
+	}
+	if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
+		pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
+		       maj, min);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr)
+{
+	int maj = suof_hdr->maj_ver & 0xff;
+	int min = suof_hdr->min_ver & 0xff;
+
+	if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
+		pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id);
+		return -EINVAL;
+	}
+	if (suof_hdr->fw_type != 0) {
+		pr_err("QAT: unsupported firmware type\n");
+		return -EINVAL;
+	}
+	if (suof_hdr->num_chunks <= 0x1) {
+		pr_err("QAT: SUOF chunk amount is incorrect\n");
+		return -EINVAL;
+	}
+	if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
+		pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
+		       maj, min);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
+				      unsigned int addr, unsigned int *val,
+				      unsigned int num_in_bytes)
+{
+	unsigned int outval;
+	unsigned char *ptr = (unsigned char *)val;
+
+	while (num_in_bytes) {
+		memcpy(&outval, ptr, 4);
+		SRAM_WRITE(handle, addr, outval);
+		num_in_bytes -= 4;
+		ptr += 4;
+		addr += 4;
+	}
+}
+
+static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
+				      unsigned char ae, unsigned int addr,
+				      unsigned int *val,
+				      unsigned int num_in_bytes)
+{
+	unsigned int outval;
+	unsigned char *ptr = (unsigned char *)val;
+
+	addr >>= 0x2; /* convert to uword address */
+
+	while (num_in_bytes) {
+		memcpy(&outval, ptr, 4);
+		qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
+		num_in_bytes -= 4;
+		ptr += 4;
+	}
+}
+
+static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
+				   unsigned char ae,
+				   struct icp_qat_uof_batch_init
+				   *umem_init_header)
+{
+	struct icp_qat_uof_batch_init *umem_init;
+
+	if (!umem_init_header)
+		return;
+	umem_init = umem_init_header->next;
+	while (umem_init) {
+		unsigned int addr, *value, size;
+
+		ae = umem_init->ae;
+		addr = umem_init->addr;
+		value = umem_init->value;
+		size = umem_init->size;
+		qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
+		umem_init = umem_init->next;
+	}
+}
+
+static void
+qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
+				 struct icp_qat_uof_batch_init **base)
+{
+	struct icp_qat_uof_batch_init *umem_init;
+
+	umem_init = *base;
+	while (umem_init) {
+		struct icp_qat_uof_batch_init *pre;
+
+		pre = umem_init;
+		umem_init = umem_init->next;
+		kfree(pre);
+	}
+	*base = NULL;
+}
+
+static int qat_uclo_parse_num(char *str, unsigned int *num)
+{
+	char buf[16] = {0};
+	unsigned long ae = 0;
+	int i;
+
+	strncpy(buf, str, 15);
+	for (i = 0; i < 16; i++) {
+		if (!isdigit(buf[i])) {
+			buf[i] = '\0';
+			break;
+		}
+	}
+	if ((kstrtoul(buf, 10, &ae)))
+		return -EFAULT;
+
+	*num = (unsigned int)ae;
+	return 0;
+}
+
+static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
+				     struct icp_qat_uof_initmem *init_mem,
+				     unsigned int size_range, unsigned int *ae)
+{
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	char *str;
+
+	if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
+		pr_err("QAT: initmem is out of range");
+		return -EINVAL;
+	}
+	if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
+		pr_err("QAT: Memory scope for init_mem error\n");
+		return -EINVAL;
+	}
+	str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
+	if (!str) {
+		pr_err("QAT: AE name assigned in UOF init table is NULL\n");
+		return -EINVAL;
+	}
+	if (qat_uclo_parse_num(str, ae)) {
+		pr_err("QAT: Parse num for AE number failed\n");
+		return -EINVAL;
+	}
+	if (*ae >= ICP_QAT_UCLO_MAX_AE) {
+		pr_err("QAT: ae %d out of range\n", *ae);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
+					   *handle, struct icp_qat_uof_initmem
+					   *init_mem, unsigned int ae,
+					   struct icp_qat_uof_batch_init
+					   **init_tab_base)
+{
+	struct icp_qat_uof_batch_init *init_header, *tail;
+	struct icp_qat_uof_batch_init *mem_init, *tail_old;
+	struct icp_qat_uof_memvar_attr *mem_val_attr;
+	unsigned int i, flag = 0;
+
+	mem_val_attr =
+		(struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem +
+		sizeof(struct icp_qat_uof_initmem));
+
+	init_header = *init_tab_base;
+	if (!init_header) {
+		init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
+		if (!init_header)
+			return -ENOMEM;
+		init_header->size = 1;
+		*init_tab_base = init_header;
+		flag = 1;
+	}
+	tail_old = init_header;
+	while (tail_old->next)
+		tail_old = tail_old->next;
+	tail = tail_old;
+	for (i = 0; i < init_mem->val_attr_num; i++) {
+		mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
+		if (!mem_init)
+			goto out_err;
+		mem_init->ae = ae;
+		mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
+		mem_init->value = &mem_val_attr->value;
+		mem_init->size = 4;
+		mem_init->next = NULL;
+		tail->next = mem_init;
+		tail = mem_init;
+		init_header->size += qat_hal_get_ins_num();
+		mem_val_attr++;
+	}
+	return 0;
+out_err:
+	/* Do not free the list head unless we allocated it. */
+	tail_old = tail_old->next;
+	if (flag) {
+		kfree(*init_tab_base);
+		*init_tab_base = NULL;
+	}
+
+	while (tail_old) {
+		mem_init = tail_old->next;
+		kfree(tail_old);
+		tail_old = mem_init;
+	}
+	return -ENOMEM;
+}
+
+static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
+				  struct icp_qat_uof_initmem *init_mem)
+{
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	unsigned int ae;
+
+	if (qat_uclo_fetch_initmem_ae(handle, init_mem,
+				      ICP_QAT_UCLO_MAX_LMEM_REG, &ae))
+		return -EINVAL;
+	if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
+					    &obj_handle->lm_init_tab[ae]))
+		return -EINVAL;
+	return 0;
+}
+
+static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
+				  struct icp_qat_uof_initmem *init_mem)
+{
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	unsigned int ae, ustore_size, uaddr, i;
+
+	ustore_size = obj_handle->ustore_phy_size;
+	if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
+		return -EINVAL;
+	if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
+					    &obj_handle->umem_init_tab[ae]))
+		return -EINVAL;
+	/* set the highest ustore address referenced */
+	uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
+	for (i = 0; i < obj_handle->ae_data[ae].slice_num; i++) {
+		if (obj_handle->ae_data[ae].ae_slices[i].
+		    encap_image->uwords_num < uaddr)
+			obj_handle->ae_data[ae].ae_slices[i].
+			encap_image->uwords_num = uaddr;
+	}
+	return 0;
+}
+
+#define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000
+static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
+				   struct icp_qat_uof_initmem *init_mem)
+{
+	switch (init_mem->region) {
+	case ICP_QAT_UOF_LMEM_REGION:
+		if (qat_uclo_init_lmem_seg(handle, init_mem))
+			return -EINVAL;
+		break;
+	case ICP_QAT_UOF_UMEM_REGION:
+		if (qat_uclo_init_umem_seg(handle, init_mem))
+			return -EINVAL;
+		break;
+	default:
+		pr_err("QAT: initmem region error. region type=0x%x\n",
+		       init_mem->region);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
+				struct icp_qat_uclo_encapme *image)
+{
+	unsigned int i;
+	struct icp_qat_uclo_encap_page *page;
+	struct icp_qat_uof_image *uof_image;
+	unsigned char ae;
+	unsigned int ustore_size;
+	unsigned int patt_pos;
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	uint64_t *fill_data;
+
+	uof_image = image->img_ptr;
+	fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t),
+			    GFP_KERNEL);
+	if (!fill_data)
+		return -ENOMEM;
+	for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
+		memcpy(&fill_data[i], &uof_image->fill_pattern,
+		       sizeof(uint64_t));
+	page = image->page;
+
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned))
+			continue;
+		ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
+		patt_pos = page->beg_addr_p + page->micro_words_num;
+
+		qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
+				  page->beg_addr_p, &fill_data[0]);
+		qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
+				  ustore_size - patt_pos + 1,
+				  &fill_data[page->beg_addr_p]);
+	}
+	kfree(fill_data);
+	return 0;
+}
+
+static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
+{
+	int i, ae;
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
+
+	for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
+		if (initmem->num_in_bytes) {
+			if (qat_uclo_init_ae_memory(handle, initmem))
+				return -EINVAL;
+		}
+		initmem = (struct icp_qat_uof_initmem *)((uintptr_t)(
+			(uintptr_t)initmem +
+			sizeof(struct icp_qat_uof_initmem)) +
+			(sizeof(struct icp_qat_uof_memvar_attr) *
+			initmem->val_attr_num));
+	}
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		if (qat_hal_batch_wr_lm(handle, ae,
+					obj_handle->lm_init_tab[ae])) {
+			pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
+			return -EINVAL;
+		}
+		qat_uclo_cleanup_batch_init_list(handle,
+						 &obj_handle->lm_init_tab[ae]);
+		qat_uclo_batch_wr_umem(handle, ae,
+				       obj_handle->umem_init_tab[ae]);
+		qat_uclo_cleanup_batch_init_list(handle,
+						 &obj_handle->
+						 umem_init_tab[ae]);
+	}
+	return 0;
+}
+
+static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
+				 char *chunk_id, void *cur)
+{
+	int i;
+	struct icp_qat_uof_chunkhdr *chunk_hdr =
+	    (struct icp_qat_uof_chunkhdr *)
+	    ((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
+
+	for (i = 0; i < obj_hdr->num_chunks; i++) {
+		if ((cur < (void *)&chunk_hdr[i]) &&
+		    !strncmp(chunk_hdr[i].chunk_id, chunk_id,
+			     ICP_QAT_UOF_OBJID_LEN)) {
+			return &chunk_hdr[i];
+		}
+	}
+	return NULL;
+}
+
+static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
+{
+	int i;
+	unsigned int topbit = 1 << 0xF;
+	unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
+
+	reg ^= inbyte << 0x8;
+	for (i = 0; i < 0x8; i++) {
+		if (reg & topbit)
+			reg = (reg << 1) ^ 0x1021;
+		else
+			reg <<= 1;
+	}
+	return reg & 0xFFFF;
+}
+
+static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
+{
+	unsigned int chksum = 0;
+
+	if (ptr)
+		while (num--)
+			chksum = qat_uclo_calc_checksum(chksum, *ptr++);
+	return chksum;
+}
+
+static struct icp_qat_uclo_objhdr *
+qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
+		   char *chunk_id)
+{
+	struct icp_qat_uof_filechunkhdr *file_chunk;
+	struct icp_qat_uclo_objhdr *obj_hdr;
+	char *chunk;
+	int i;
+
+	file_chunk = (struct icp_qat_uof_filechunkhdr *)
+		(buf + sizeof(struct icp_qat_uof_filehdr));
+	for (i = 0; i < file_hdr->num_chunks; i++) {
+		if (!strncmp(file_chunk->chunk_id, chunk_id,
+			     ICP_QAT_UOF_OBJID_LEN)) {
+			chunk = buf + file_chunk->offset;
+			if (file_chunk->checksum != qat_uclo_calc_str_checksum(
+				chunk, file_chunk->size))
+				break;
+			obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
+			if (!obj_hdr)
+				break;
+			obj_hdr->file_buff = chunk;
+			obj_hdr->checksum = file_chunk->checksum;
+			obj_hdr->size = file_chunk->size;
+			return obj_hdr;
+		}
+		file_chunk++;
+	}
+	return NULL;
+}
+
+static unsigned int
+qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
+			    struct icp_qat_uof_image *image)
+{
+	struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
+	struct icp_qat_uof_objtable *neigh_reg_tab;
+	struct icp_qat_uof_code_page *code_page;
+
+	code_page = (struct icp_qat_uof_code_page *)
+			((char *)image + sizeof(struct icp_qat_uof_image));
+	uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
+		     code_page->uc_var_tab_offset);
+	imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
+		      code_page->imp_var_tab_offset);
+	imp_expr_tab = (struct icp_qat_uof_objtable *)
+		       (encap_uof_obj->beg_uof +
+		       code_page->imp_expr_tab_offset);
+	if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
+	    imp_expr_tab->entry_num) {
+		pr_err("QAT: UOF can't contain imported variable to be parsed");
+		return -EINVAL;
+	}
+	neigh_reg_tab = (struct icp_qat_uof_objtable *)
+			(encap_uof_obj->beg_uof +
+			code_page->neigh_reg_tab_offset);
+	if (neigh_reg_tab->entry_num) {
+		pr_err("QAT: UOF can't contain shared control store feature");
+		return -EINVAL;
+	}
+	if (image->numpages > 1) {
+		pr_err("QAT: UOF can't contain multiple pages");
+		return -EINVAL;
+	}
+	if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
+		pr_err("QAT: UOF can't use shared control store feature");
+		return -EFAULT;
+	}
+	if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
+		pr_err("QAT: UOF can't use reloadable feature");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
+				     *encap_uof_obj,
+				     struct icp_qat_uof_image *img,
+				     struct icp_qat_uclo_encap_page *page)
+{
+	struct icp_qat_uof_code_page *code_page;
+	struct icp_qat_uof_code_area *code_area;
+	struct icp_qat_uof_objtable *uword_block_tab;
+	struct icp_qat_uof_uword_block *uwblock;
+	int i;
+
+	code_page = (struct icp_qat_uof_code_page *)
+			((char *)img + sizeof(struct icp_qat_uof_image));
+	page->def_page = code_page->def_page;
+	page->page_region = code_page->page_region;
+	page->beg_addr_v = code_page->beg_addr_v;
+	page->beg_addr_p = code_page->beg_addr_p;
+	code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
+						code_page->code_area_offset);
+	page->micro_words_num = code_area->micro_words_num;
+	uword_block_tab = (struct icp_qat_uof_objtable *)
+			  (encap_uof_obj->beg_uof +
+			  code_area->uword_block_tab);
+	page->uwblock_num = uword_block_tab->entry_num;
+	uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
+			sizeof(struct icp_qat_uof_objtable));
+	page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
+	for (i = 0; i < uword_block_tab->entry_num; i++)
+		page->uwblock[i].micro_words =
+		(uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
+}
+
+static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
+			       struct icp_qat_uclo_encapme *ae_uimage,
+			       int max_image)
+{
+	int i, j;
+	struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
+	struct icp_qat_uof_image *image;
+	struct icp_qat_uof_objtable *ae_regtab;
+	struct icp_qat_uof_objtable *init_reg_sym_tab;
+	struct icp_qat_uof_objtable *sbreak_tab;
+	struct icp_qat_uof_encap_obj *encap_uof_obj =
+					&obj_handle->encap_uof_obj;
+
+	for (j = 0; j < max_image; j++) {
+		chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
+						ICP_QAT_UOF_IMAG, chunk_hdr);
+		if (!chunk_hdr)
+			break;
+		image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
+						     chunk_hdr->offset);
+		ae_regtab = (struct icp_qat_uof_objtable *)
+			   (image->reg_tab_offset +
+			   obj_handle->obj_hdr->file_buff);
+		ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
+		ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
+			(((char *)ae_regtab) +
+			sizeof(struct icp_qat_uof_objtable));
+		init_reg_sym_tab = (struct icp_qat_uof_objtable *)
+				   (image->init_reg_sym_tab +
+				   obj_handle->obj_hdr->file_buff);
+		ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
+		ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
+			(((char *)init_reg_sym_tab) +
+			sizeof(struct icp_qat_uof_objtable));
+		sbreak_tab = (struct icp_qat_uof_objtable *)
+			(image->sbreak_tab + obj_handle->obj_hdr->file_buff);
+		ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
+		ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
+				      (((char *)sbreak_tab) +
+				      sizeof(struct icp_qat_uof_objtable));
+		ae_uimage[j].img_ptr = image;
+		if (qat_uclo_check_image_compat(encap_uof_obj, image))
+			goto out_err;
+		ae_uimage[j].page =
+			kzalloc(sizeof(struct icp_qat_uclo_encap_page),
+				GFP_KERNEL);
+		if (!ae_uimage[j].page)
+			goto out_err;
+		qat_uclo_map_image_page(encap_uof_obj, image,
+					ae_uimage[j].page);
+	}
+	return j;
+out_err:
+	for (i = 0; i < j; i++)
+		kfree(ae_uimage[i].page);
+	return 0;
+}
+
+static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
+{
+	int i, ae;
+	int mflag = 0;
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+
+	for (ae = 0; ae < max_ae; ae++) {
+		if (!test_bit(ae,
+			      (unsigned long *)&handle->hal_handle->ae_mask))
+			continue;
+		for (i = 0; i < obj_handle->uimage_num; i++) {
+			if (!test_bit(ae, (unsigned long *)
+			&obj_handle->ae_uimage[i].img_ptr->ae_assigned))
+				continue;
+			mflag = 1;
+			if (qat_uclo_init_ae_data(obj_handle, ae, i))
+				return -EINVAL;
+		}
+	}
+	if (!mflag) {
+		pr_err("QAT: uimage uses AE not set");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static struct icp_qat_uof_strtable *
+qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
+		       char *tab_name, struct icp_qat_uof_strtable *str_table)
+{
+	struct icp_qat_uof_chunkhdr *chunk_hdr;
+
+	chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
+					obj_hdr->file_buff, tab_name, NULL);
+	if (chunk_hdr) {
+		int hdr_size;
+
+		memcpy(&str_table->table_len, obj_hdr->file_buff +
+		       chunk_hdr->offset, sizeof(str_table->table_len));
+		hdr_size = (char *)&str_table->strings - (char *)str_table;
+		str_table->strings = (uintptr_t)obj_hdr->file_buff +
+					chunk_hdr->offset + hdr_size;
+		return str_table;
+	}
+	return NULL;
+}
+
+static void
+qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
+			   struct icp_qat_uclo_init_mem_table *init_mem_tab)
+{
+	struct icp_qat_uof_chunkhdr *chunk_hdr;
+
+	chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
+					ICP_QAT_UOF_IMEM, NULL);
+	if (chunk_hdr) {
+		memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
+			chunk_hdr->offset, sizeof(unsigned int));
+		init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
+		(encap_uof_obj->beg_uof + chunk_hdr->offset +
+		sizeof(unsigned int));
+	}
+}
+
+static unsigned int
+qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
+{
+	switch (handle->pci_dev->device) {
+	case ADF_DH895XCC_PCI_DEVICE_ID:
+		return ICP_QAT_AC_895XCC_DEV_TYPE;
+	case ADF_C62X_PCI_DEVICE_ID:
+		return ICP_QAT_AC_C62X_DEV_TYPE;
+	case ADF_C3XXX_PCI_DEVICE_ID:
+		return ICP_QAT_AC_C3XXX_DEV_TYPE;
+	default:
+		pr_err("QAT: unsupported device 0x%x\n",
+		       handle->pci_dev->device);
+		return 0;
+	}
+}
+
+static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
+{
+	unsigned int maj_ver, prod_type = obj_handle->prod_type;
+
+	if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
+		pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
+		       obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
+		       prod_type);
+		return -EINVAL;
+	}
+	maj_ver = obj_handle->prod_rev & 0xff;
+	if ((obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver) ||
+	    (obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver)) {
+		pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
+			     unsigned char ae, unsigned char ctx_mask,
+			     enum icp_qat_uof_regtype reg_type,
+			     unsigned short reg_addr, unsigned int value)
+{
+	switch (reg_type) {
+	case ICP_GPA_ABS:
+	case ICP_GPB_ABS:
+		ctx_mask = 0;
+	case ICP_GPA_REL:
+	case ICP_GPB_REL:
+		return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
+					reg_addr, value);
+	case ICP_SR_ABS:
+	case ICP_DR_ABS:
+	case ICP_SR_RD_ABS:
+	case ICP_DR_RD_ABS:
+		ctx_mask = 0;
+	case ICP_SR_REL:
+	case ICP_DR_REL:
+	case ICP_SR_RD_REL:
+	case ICP_DR_RD_REL:
+		return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
+					    reg_addr, value);
+	case ICP_SR_WR_ABS:
+	case ICP_DR_WR_ABS:
+		ctx_mask = 0;
+	case ICP_SR_WR_REL:
+	case ICP_DR_WR_REL:
+		return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
+					    reg_addr, value);
+	case ICP_NEIGH_REL:
+		return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
+	default:
+		pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
+				 unsigned int ae,
+				 struct icp_qat_uclo_encapme *encap_ae)
+{
+	unsigned int i;
+	unsigned char ctx_mask;
+	struct icp_qat_uof_init_regsym *init_regsym;
+
+	if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
+	    ICP_QAT_UCLO_MAX_CTX)
+		ctx_mask = 0xff;
+	else
+		ctx_mask = 0x55;
+
+	for (i = 0; i < encap_ae->init_regsym_num; i++) {
+		unsigned int exp_res;
+
+		init_regsym = &encap_ae->init_regsym[i];
+		exp_res = init_regsym->value;
+		switch (init_regsym->init_type) {
+		case ICP_QAT_UOF_INIT_REG:
+			qat_uclo_init_reg(handle, ae, ctx_mask,
+					  (enum icp_qat_uof_regtype)
+					  init_regsym->reg_type,
+					  (unsigned short)init_regsym->reg_addr,
+					  exp_res);
+			break;
+		case ICP_QAT_UOF_INIT_REG_CTX:
+			/* check if ctx is appropriate for the ctxMode */
+			if (!((1 << init_regsym->ctx) & ctx_mask)) {
+				pr_err("QAT: invalid ctx num = 0x%x\n",
+				       init_regsym->ctx);
+				return -EINVAL;
+			}
+			qat_uclo_init_reg(handle, ae,
+					  (unsigned char)
+					  (1 << init_regsym->ctx),
+					  (enum icp_qat_uof_regtype)
+					  init_regsym->reg_type,
+					  (unsigned short)init_regsym->reg_addr,
+					  exp_res);
+			break;
+		case ICP_QAT_UOF_INIT_EXPR:
+			pr_err("QAT: INIT_EXPR feature not supported\n");
+			return -EINVAL;
+		case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
+			pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
+			return -EINVAL;
+		default:
+			break;
+		}
+	}
+	return 0;
+}
+
+static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
+{
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	unsigned int s, ae;
+
+	if (obj_handle->global_inited)
+		return 0;
+	if (obj_handle->init_mem_tab.entry_num) {
+		if (qat_uclo_init_memory(handle)) {
+			pr_err("QAT: initialize memory failed\n");
+			return -EINVAL;
+		}
+	}
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
+			if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
+				continue;
+			if (qat_uclo_init_reg_sym(handle, ae,
+						  obj_handle->ae_data[ae].
+						  ae_slices[s].encap_image))
+				return -EINVAL;
+		}
+	}
+	obj_handle->global_inited = 1;
+	return 0;
+}
+
+static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
+{
+	unsigned char ae, nn_mode, s;
+	struct icp_qat_uof_image *uof_image;
+	struct icp_qat_uclo_aedata *ae_data;
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		if (!test_bit(ae,
+			      (unsigned long *)&handle->hal_handle->ae_mask))
+			continue;
+		ae_data = &obj_handle->ae_data[ae];
+		for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
+				      ICP_QAT_UCLO_MAX_CTX); s++) {
+			if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
+				continue;
+			uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
+			if (qat_hal_set_ae_ctx_mode(handle, ae,
+						    (char)ICP_QAT_CTX_MODE
+						    (uof_image->ae_mode))) {
+				pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
+				return -EFAULT;
+			}
+			nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
+			if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) {
+				pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
+				return -EFAULT;
+			}
+			if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0,
+						   (char)ICP_QAT_LOC_MEM0_MODE
+						   (uof_image->ae_mode))) {
+				pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
+				return -EFAULT;
+			}
+			if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1,
+						   (char)ICP_QAT_LOC_MEM1_MODE
+						   (uof_image->ae_mode))) {
+				pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
+				return -EFAULT;
+			}
+		}
+	}
+	return 0;
+}
+
+static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
+{
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	struct icp_qat_uclo_encapme *image;
+	int a;
+
+	for (a = 0; a < obj_handle->uimage_num; a++) {
+		image = &obj_handle->ae_uimage[a];
+		image->uwords_num = image->page->beg_addr_p +
+					image->page->micro_words_num;
+	}
+}
+
+static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
+{
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	unsigned int ae;
+
+	obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
+	obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
+					     obj_handle->obj_hdr->file_buff;
+	obj_handle->uword_in_bytes = 6;
+	obj_handle->prod_type = qat_uclo_get_dev_type(handle);
+	obj_handle->prod_rev = PID_MAJOR_REV |
+			(PID_MINOR_REV & handle->hal_handle->revision_id);
+	if (qat_uclo_check_uof_compat(obj_handle)) {
+		pr_err("QAT: UOF incompatible\n");
+		return -EINVAL;
+	}
+	obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t),
+					GFP_KERNEL);
+	if (!obj_handle->uword_buf)
+		return -ENOMEM;
+	obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
+	if (!obj_handle->obj_hdr->file_buff ||
+	    !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
+				    &obj_handle->str_table)) {
+		pr_err("QAT: UOF doesn't have effective images\n");
+		goto out_err;
+	}
+	obj_handle->uimage_num =
+		qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
+				    ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
+	if (!obj_handle->uimage_num)
+		goto out_err;
+	if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
+		pr_err("QAT: Bad object\n");
+		goto out_check_uof_aemask_err;
+	}
+	qat_uclo_init_uword_num(handle);
+	qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
+				   &obj_handle->init_mem_tab);
+	if (qat_uclo_set_ae_mode(handle))
+		goto out_check_uof_aemask_err;
+	return 0;
+out_check_uof_aemask_err:
+	for (ae = 0; ae < obj_handle->uimage_num; ae++)
+		kfree(obj_handle->ae_uimage[ae].page);
+out_err:
+	kfree(obj_handle->uword_buf);
+	return -EFAULT;
+}
+
+static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
+				      struct icp_qat_suof_filehdr *suof_ptr,
+				      int suof_size)
+{
+	unsigned int check_sum = 0;
+	unsigned int min_ver_offset = 0;
+	struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
+
+	suof_handle->file_id = ICP_QAT_SUOF_FID;
+	suof_handle->suof_buf = (char *)suof_ptr;
+	suof_handle->suof_size = suof_size;
+	min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr,
+					      min_ver);
+	check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver,
+					       min_ver_offset);
+	if (check_sum != suof_ptr->check_sum) {
+		pr_err("QAT: incorrect SUOF checksum\n");
+		return -EINVAL;
+	}
+	suof_handle->check_sum = suof_ptr->check_sum;
+	suof_handle->min_ver = suof_ptr->min_ver;
+	suof_handle->maj_ver = suof_ptr->maj_ver;
+	suof_handle->fw_type = suof_ptr->fw_type;
+	return 0;
+}
+
+static void qat_uclo_map_simg(struct icp_qat_suof_handle *suof_handle,
+			      struct icp_qat_suof_img_hdr *suof_img_hdr,
+			      struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
+{
+	struct icp_qat_simg_ae_mode *ae_mode;
+	struct icp_qat_suof_objhdr *suof_objhdr;
+
+	suof_img_hdr->simg_buf  = (suof_handle->suof_buf +
+				   suof_chunk_hdr->offset +
+				   sizeof(*suof_objhdr));
+	suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t)
+				  (suof_handle->suof_buf +
+				   suof_chunk_hdr->offset))->img_length;
+
+	suof_img_hdr->css_header = suof_img_hdr->simg_buf;
+	suof_img_hdr->css_key = (suof_img_hdr->css_header +
+				 sizeof(struct icp_qat_css_hdr));
+	suof_img_hdr->css_signature = suof_img_hdr->css_key +
+				      ICP_QAT_CSS_FWSK_MODULUS_LEN +
+				      ICP_QAT_CSS_FWSK_EXPONENT_LEN;
+	suof_img_hdr->css_simg = suof_img_hdr->css_signature +
+				 ICP_QAT_CSS_SIGNATURE_LEN;
+
+	ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg);
+	suof_img_hdr->ae_mask = ae_mode->ae_mask;
+	suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name;
+	suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data;
+	suof_img_hdr->fw_type = ae_mode->fw_type;
+}
+
+static void
+qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle,
+			  struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
+{
+	char **sym_str = (char **)&suof_handle->sym_str;
+	unsigned int *sym_size = &suof_handle->sym_size;
+	struct icp_qat_suof_strtable *str_table_obj;
+
+	*sym_size = *(unsigned int *)(uintptr_t)
+		   (suof_chunk_hdr->offset + suof_handle->suof_buf);
+	*sym_str = (char *)(uintptr_t)
+		   (suof_handle->suof_buf + suof_chunk_hdr->offset +
+		   sizeof(str_table_obj->tab_length));
+}
+
+static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
+				      struct icp_qat_suof_img_hdr *img_hdr)
+{
+	struct icp_qat_simg_ae_mode *img_ae_mode = NULL;
+	unsigned int prod_rev, maj_ver, prod_type;
+
+	prod_type = qat_uclo_get_dev_type(handle);
+	img_ae_mode = (struct icp_qat_simg_ae_mode *)img_hdr->css_simg;
+	prod_rev = PID_MAJOR_REV |
+			 (PID_MINOR_REV & handle->hal_handle->revision_id);
+	if (img_ae_mode->dev_type != prod_type) {
+		pr_err("QAT: incompatible product type %x\n",
+		       img_ae_mode->dev_type);
+		return -EINVAL;
+	}
+	maj_ver = prod_rev & 0xff;
+	if ((maj_ver > img_ae_mode->devmax_ver) ||
+	    (maj_ver < img_ae_mode->devmin_ver)) {
+		pr_err("QAT: incompatible device majver 0x%x\n", maj_ver);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle)
+{
+	struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
+
+	kfree(sobj_handle->img_table.simg_hdr);
+	sobj_handle->img_table.simg_hdr = NULL;
+	kfree(handle->sobj_handle);
+	handle->sobj_handle = NULL;
+}
+
+static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr,
+			      unsigned int img_id, unsigned int num_simgs)
+{
+	struct icp_qat_suof_img_hdr img_header;
+
+	if (img_id != num_simgs - 1) {
+		memcpy(&img_header, &suof_img_hdr[num_simgs - 1],
+		       sizeof(*suof_img_hdr));
+		memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id],
+		       sizeof(*suof_img_hdr));
+		memcpy(&suof_img_hdr[img_id], &img_header,
+		       sizeof(*suof_img_hdr));
+	}
+}
+
+static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
+			     struct icp_qat_suof_filehdr *suof_ptr,
+			     int suof_size)
+{
+	struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
+	struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL;
+	struct icp_qat_suof_img_hdr *suof_img_hdr = NULL;
+	int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE;
+	unsigned int i = 0;
+	struct icp_qat_suof_img_hdr img_header;
+
+	if (!suof_ptr || (suof_size == 0)) {
+		pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
+		return -EINVAL;
+	}
+	if (qat_uclo_check_suof_format(suof_ptr))
+		return -EINVAL;
+	ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size);
+	if (ret)
+		return ret;
+	suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)
+			 ((uintptr_t)suof_ptr + sizeof(*suof_ptr));
+
+	qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr);
+	suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1;
+
+	if (suof_handle->img_table.num_simgs != 0) {
+		suof_img_hdr = kzalloc(suof_handle->img_table.num_simgs *
+				       sizeof(img_header), GFP_KERNEL);
+		if (!suof_img_hdr)
+			return -ENOMEM;
+		suof_handle->img_table.simg_hdr = suof_img_hdr;
+	}
+
+	for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
+		qat_uclo_map_simg(handle->sobj_handle, &suof_img_hdr[i],
+				  &suof_chunk_hdr[1 + i]);
+		ret = qat_uclo_check_simg_compat(handle,
+						 &suof_img_hdr[i]);
+		if (ret)
+			return ret;
+		if ((suof_img_hdr[i].ae_mask & 0x1) != 0)
+			ae0_img = i;
+	}
+	qat_uclo_tail_img(suof_img_hdr, ae0_img,
+			  suof_handle->img_table.num_simgs);
+	return 0;
+}
+
+#define ADD_ADDR(high, low)  ((((uint64_t)high) << 32) + low)
+#define BITS_IN_DWORD 32
+
+static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
+			    struct icp_qat_fw_auth_desc *desc)
+{
+	unsigned int fcu_sts, retry = 0;
+	u64 bus_addr;
+
+	bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low)
+			   - sizeof(struct icp_qat_auth_chunk);
+	SET_CAP_CSR(handle, FCU_DRAM_ADDR_HI, (bus_addr >> BITS_IN_DWORD));
+	SET_CAP_CSR(handle, FCU_DRAM_ADDR_LO, bus_addr);
+	SET_CAP_CSR(handle, FCU_CONTROL, FCU_CTRL_CMD_AUTH);
+
+	do {
+		msleep(FW_AUTH_WAIT_PERIOD);
+		fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
+		if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL)
+			goto auth_fail;
+		if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1))
+			if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE)
+				return 0;
+	} while (retry++ < FW_AUTH_MAX_RETRY);
+auth_fail:
+	pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
+	       fcu_sts & FCU_AUTH_STS_MASK, retry);
+	return -EINVAL;
+}
+
+static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle,
+			       struct icp_firml_dram_desc *dram_desc,
+			       unsigned int size)
+{
+	void *vptr;
+	dma_addr_t ptr;
+
+	vptr = dma_alloc_coherent(&handle->pci_dev->dev,
+				  size, &ptr, GFP_KERNEL);
+	if (!vptr)
+		return -ENOMEM;
+	dram_desc->dram_base_addr_v = vptr;
+	dram_desc->dram_bus_addr = ptr;
+	dram_desc->dram_size = size;
+	return 0;
+}
+
+static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle,
+			       struct icp_firml_dram_desc *dram_desc)
+{
+	dma_free_coherent(&handle->pci_dev->dev,
+			  (size_t)(dram_desc->dram_size),
+			  (dram_desc->dram_base_addr_v),
+			  dram_desc->dram_bus_addr);
+	memset(dram_desc, 0, sizeof(*dram_desc));
+}
+
+static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
+				   struct icp_qat_fw_auth_desc **desc)
+{
+	struct icp_firml_dram_desc dram_desc;
+
+	dram_desc.dram_base_addr_v = *desc;
+	dram_desc.dram_bus_addr = ((struct icp_qat_auth_chunk *)
+				   (*desc))->chunk_bus_addr;
+	dram_desc.dram_size = ((struct icp_qat_auth_chunk *)
+			       (*desc))->chunk_size;
+	qat_uclo_simg_free(handle, &dram_desc);
+}
+
+static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
+				char *image, unsigned int size,
+				struct icp_qat_fw_auth_desc **desc)
+{
+	struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
+	struct icp_qat_fw_auth_desc *auth_desc;
+	struct icp_qat_auth_chunk *auth_chunk;
+	u64 virt_addr,  bus_addr, virt_base;
+	unsigned int length, simg_offset = sizeof(*auth_chunk);
+	struct icp_firml_dram_desc img_desc;
+
+	if (size > (ICP_QAT_AE_IMG_OFFSET + ICP_QAT_CSS_MAX_IMAGE_LEN)) {
+		pr_err("QAT: error, input image size overflow %d\n", size);
+		return -EINVAL;
+	}
+	length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ?
+		 ICP_QAT_CSS_AE_SIMG_LEN + simg_offset :
+		 size + ICP_QAT_CSS_FWSK_PAD_LEN + simg_offset;
+	if (qat_uclo_simg_alloc(handle, &img_desc, length)) {
+		pr_err("QAT: error, allocate continuous dram fail\n");
+		return -ENOMEM;
+	}
+
+	auth_chunk = img_desc.dram_base_addr_v;
+	auth_chunk->chunk_size = img_desc.dram_size;
+	auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
+	virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset;
+	bus_addr  = img_desc.dram_bus_addr + simg_offset;
+	auth_desc = img_desc.dram_base_addr_v;
+	auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+	auth_desc->css_hdr_low = (unsigned int)bus_addr;
+	virt_addr = virt_base;
+
+	memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr));
+	/* pub key */
+	bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
+			   sizeof(*css_hdr);
+	virt_addr = virt_addr + sizeof(*css_hdr);
+
+	auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+	auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
+
+	memcpy((void *)(uintptr_t)virt_addr,
+	       (void *)(image + sizeof(*css_hdr)),
+	       ICP_QAT_CSS_FWSK_MODULUS_LEN);
+	/* padding */
+	memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN),
+	       0, ICP_QAT_CSS_FWSK_PAD_LEN);
+
+	/* exponent */
+	memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN +
+	       ICP_QAT_CSS_FWSK_PAD_LEN),
+	       (void *)(image + sizeof(*css_hdr) +
+			ICP_QAT_CSS_FWSK_MODULUS_LEN),
+	       sizeof(unsigned int));
+
+	/* signature */
+	bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high,
+			    auth_desc->fwsk_pub_low) +
+		   ICP_QAT_CSS_FWSK_PUB_LEN;
+	virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN;
+	auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+	auth_desc->signature_low = (unsigned int)bus_addr;
+
+	memcpy((void *)(uintptr_t)virt_addr,
+	       (void *)(image + sizeof(*css_hdr) +
+	       ICP_QAT_CSS_FWSK_MODULUS_LEN +
+	       ICP_QAT_CSS_FWSK_EXPONENT_LEN),
+	       ICP_QAT_CSS_SIGNATURE_LEN);
+
+	bus_addr = ADD_ADDR(auth_desc->signature_high,
+			    auth_desc->signature_low) +
+		   ICP_QAT_CSS_SIGNATURE_LEN;
+	virt_addr += ICP_QAT_CSS_SIGNATURE_LEN;
+
+	auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+	auth_desc->img_low = (unsigned int)bus_addr;
+	auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET;
+	memcpy((void *)(uintptr_t)virt_addr,
+	       (void *)(image + ICP_QAT_AE_IMG_OFFSET),
+	       auth_desc->img_len);
+	virt_addr = virt_base;
+	/* AE firmware */
+	if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type ==
+	    CSS_AE_FIRMWARE) {
+		auth_desc->img_ae_mode_data_high = auth_desc->img_high;
+		auth_desc->img_ae_mode_data_low = auth_desc->img_low;
+		bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
+				    auth_desc->img_ae_mode_data_low) +
+			   sizeof(struct icp_qat_simg_ae_mode);
+
+		auth_desc->img_ae_init_data_high = (unsigned int)
+						 (bus_addr >> BITS_IN_DWORD);
+		auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
+		bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
+		auth_desc->img_ae_insts_high = (unsigned int)
+					     (bus_addr >> BITS_IN_DWORD);
+		auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
+	} else {
+		auth_desc->img_ae_insts_high = auth_desc->img_high;
+		auth_desc->img_ae_insts_low = auth_desc->img_low;
+	}
+	*desc = auth_desc;
+	return 0;
+}
+
+static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
+			    struct icp_qat_fw_auth_desc *desc)
+{
+	unsigned int i;
+	unsigned int fcu_sts;
+	struct icp_qat_simg_ae_mode *virt_addr;
+	unsigned int fcu_loaded_ae_pos = FCU_LOADED_AE_POS;
+
+	virt_addr = (void *)((uintptr_t)desc +
+		     sizeof(struct icp_qat_auth_chunk) +
+		     sizeof(struct icp_qat_css_hdr) +
+		     ICP_QAT_CSS_FWSK_PUB_LEN +
+		     ICP_QAT_CSS_SIGNATURE_LEN);
+	for (i = 0; i < handle->hal_handle->ae_max_num; i++) {
+		int retry = 0;
+
+		if (!((virt_addr->ae_mask >> i) & 0x1))
+			continue;
+		if (qat_hal_check_ae_active(handle, i)) {
+			pr_err("QAT: AE %d is active\n", i);
+			return -EINVAL;
+		}
+		SET_CAP_CSR(handle, FCU_CONTROL,
+			    (FCU_CTRL_CMD_LOAD | (i << FCU_CTRL_AE_POS)));
+
+		do {
+			msleep(FW_AUTH_WAIT_PERIOD);
+			fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
+			if (((fcu_sts & FCU_AUTH_STS_MASK) ==
+			    FCU_STS_LOAD_DONE) &&
+			    ((fcu_sts >> fcu_loaded_ae_pos) & (1 << i)))
+				break;
+		} while (retry++ < FW_AUTH_MAX_RETRY);
+		if (retry > FW_AUTH_MAX_RETRY) {
+			pr_err("QAT: firmware load failed timeout %x\n", retry);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
+				 void *addr_ptr, int mem_size)
+{
+	struct icp_qat_suof_handle *suof_handle;
+
+	suof_handle = kzalloc(sizeof(*suof_handle), GFP_KERNEL);
+	if (!suof_handle)
+		return -ENOMEM;
+	handle->sobj_handle = suof_handle;
+	if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
+		qat_uclo_del_suof(handle);
+		pr_err("QAT: map SUOF failed\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
+		       void *addr_ptr, int mem_size)
+{
+	struct icp_qat_fw_auth_desc *desc = NULL;
+	int status = 0;
+
+	if (handle->fw_auth) {
+		if (!qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc))
+			status = qat_uclo_auth_fw(handle, desc);
+		qat_uclo_ummap_auth_fw(handle, &desc);
+	} else {
+		if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID) {
+			pr_err("QAT: C3XXX doesn't support unsigned MMP\n");
+			return -EINVAL;
+		}
+		qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size);
+	}
+	return status;
+}
+
+static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
+				void *addr_ptr, int mem_size)
+{
+	struct icp_qat_uof_filehdr *filehdr;
+	struct icp_qat_uclo_objhandle *objhdl;
+
+	objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
+	if (!objhdl)
+		return -ENOMEM;
+	objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
+	if (!objhdl->obj_buf)
+		goto out_objbuf_err;
+	filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
+	if (qat_uclo_check_uof_format(filehdr))
+		goto out_objhdr_err;
+	objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
+					     ICP_QAT_UOF_OBJS);
+	if (!objhdl->obj_hdr) {
+		pr_err("QAT: object file chunk is null\n");
+		goto out_objhdr_err;
+	}
+	handle->obj_handle = objhdl;
+	if (qat_uclo_parse_uof_obj(handle))
+		goto out_overlay_obj_err;
+	return 0;
+
+out_overlay_obj_err:
+	handle->obj_handle = NULL;
+	kfree(objhdl->obj_hdr);
+out_objhdr_err:
+	kfree(objhdl->obj_buf);
+out_objbuf_err:
+	kfree(objhdl);
+	return -ENOMEM;
+}
+
+int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
+		     void *addr_ptr, int mem_size)
+{
+	BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
+		     (sizeof(handle->hal_handle->ae_mask) * 8));
+
+	if (!handle || !addr_ptr || mem_size < 24)
+		return -EINVAL;
+
+	return (handle->fw_auth) ?
+			qat_uclo_map_suof_obj(handle, addr_ptr, mem_size) :
+			qat_uclo_map_uof_obj(handle, addr_ptr, mem_size);
+}
+
+void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle)
+{
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	unsigned int a;
+
+	if (handle->sobj_handle)
+		qat_uclo_del_suof(handle);
+	if (!obj_handle)
+		return;
+
+	kfree(obj_handle->uword_buf);
+	for (a = 0; a < obj_handle->uimage_num; a++)
+		kfree(obj_handle->ae_uimage[a].page);
+
+	for (a = 0; a < handle->hal_handle->ae_max_num; a++)
+		qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
+
+	kfree(obj_handle->obj_hdr);
+	kfree(obj_handle->obj_buf);
+	kfree(obj_handle);
+	handle->obj_handle = NULL;
+}
+
+static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
+				 struct icp_qat_uclo_encap_page *encap_page,
+				 uint64_t *uword, unsigned int addr_p,
+				 unsigned int raddr, uint64_t fill)
+{
+	uint64_t uwrd = 0;
+	unsigned int i;
+
+	if (!encap_page) {
+		*uword = fill;
+		return;
+	}
+	for (i = 0; i < encap_page->uwblock_num; i++) {
+		if (raddr >= encap_page->uwblock[i].start_addr &&
+		    raddr <= encap_page->uwblock[i].start_addr +
+		    encap_page->uwblock[i].words_num - 1) {
+			raddr -= encap_page->uwblock[i].start_addr;
+			raddr *= obj_handle->uword_in_bytes;
+			memcpy(&uwrd, (void *)(((uintptr_t)
+			       encap_page->uwblock[i].micro_words) + raddr),
+			       obj_handle->uword_in_bytes);
+			uwrd = uwrd & 0xbffffffffffull;
+		}
+	}
+	*uword = uwrd;
+	if (*uword == INVLD_UWORD)
+		*uword = fill;
+}
+
+static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
+					struct icp_qat_uclo_encap_page
+					*encap_page, unsigned int ae)
+{
+	unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	uint64_t fill_pat;
+
+	/* load the page starting at appropriate ustore address */
+	/* get fill-pattern from an image -- they are all the same */
+	memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
+	       sizeof(uint64_t));
+	uw_physical_addr = encap_page->beg_addr_p;
+	uw_relative_addr = 0;
+	words_num = encap_page->micro_words_num;
+	while (words_num) {
+		if (words_num < UWORD_CPYBUF_SIZE)
+			cpylen = words_num;
+		else
+			cpylen = UWORD_CPYBUF_SIZE;
+
+		/* load the buffer */
+		for (i = 0; i < cpylen; i++)
+			qat_uclo_fill_uwords(obj_handle, encap_page,
+					     &obj_handle->uword_buf[i],
+					     uw_physical_addr + i,
+					     uw_relative_addr + i, fill_pat);
+
+		/* copy the buffer to ustore */
+		qat_hal_wr_uwords(handle, (unsigned char)ae,
+				  uw_physical_addr, cpylen,
+				  obj_handle->uword_buf);
+
+		uw_physical_addr += cpylen;
+		uw_relative_addr += cpylen;
+		words_num -= cpylen;
+	}
+}
+
+static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
+				    struct icp_qat_uof_image *image)
+{
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	unsigned int ctx_mask, s;
+	struct icp_qat_uclo_page *page;
+	unsigned char ae;
+	int ctx;
+
+	if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
+		ctx_mask = 0xff;
+	else
+		ctx_mask = 0x55;
+	/* load the default page and set assigned CTX PC
+	 * to the entrypoint address */
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+		if (!test_bit(ae, (unsigned long *)&image->ae_assigned))
+			continue;
+		/* find the slice to which this image is assigned */
+		for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
+			if (image->ctx_assigned & obj_handle->ae_data[ae].
+			    ae_slices[s].ctx_mask_assigned)
+				break;
+		}
+		if (s >= obj_handle->ae_data[ae].slice_num)
+			continue;
+		page = obj_handle->ae_data[ae].ae_slices[s].page;
+		if (!page->encap_page->def_page)
+			continue;
+		qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
+
+		page = obj_handle->ae_data[ae].ae_slices[s].page;
+		for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
+			obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] =
+					(ctx_mask & (1 << ctx)) ? page : NULL;
+		qat_hal_set_live_ctx(handle, (unsigned char)ae,
+				     image->ctx_assigned);
+		qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
+			       image->entry_address);
+	}
+}
+
+static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
+{
+	unsigned int i;
+	struct icp_qat_fw_auth_desc *desc = NULL;
+	struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
+	struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
+
+	for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
+		if (qat_uclo_map_auth_fw(handle,
+					 (char *)simg_hdr[i].simg_buf,
+					 (unsigned int)
+					 (simg_hdr[i].simg_len),
+					 &desc))
+			goto wr_err;
+		if (qat_uclo_auth_fw(handle, desc))
+			goto wr_err;
+		if (qat_uclo_load_fw(handle, desc))
+			goto wr_err;
+		qat_uclo_ummap_auth_fw(handle, &desc);
+	}
+	return 0;
+wr_err:
+	qat_uclo_ummap_auth_fw(handle, &desc);
+	return -EINVAL;
+}
+
+static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle)
+{
+	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+	unsigned int i;
+
+	if (qat_uclo_init_globals(handle))
+		return -EINVAL;
+	for (i = 0; i < obj_handle->uimage_num; i++) {
+		if (!obj_handle->ae_uimage[i].img_ptr)
+			return -EINVAL;
+		if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
+			return -EINVAL;
+		qat_uclo_wr_uimage_page(handle,
+					obj_handle->ae_uimage[i].img_ptr);
+	}
+	return 0;
+}
+
+int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
+{
+	return (handle->fw_auth) ? qat_uclo_wr_suof_img(handle) :
+				   qat_uclo_wr_uof_img(handle);
+}
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_dh895xcc/Makefile b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_dh895xcc/Makefile
new file mode 100644
index 0000000..180a00e
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_dh895xcc/Makefile
@@ -0,0 +1,3 @@
+ccflags-y := -I$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
+qat_dh895xcc-objs := adf_drv.o adf_dh895xcc_hw_data.o
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
new file mode 100644
index 0000000..1dfcab3
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -0,0 +1,262 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include <adf_pf2vf_msg.h>
+#include <adf_common_drv.h>
+#include "adf_dh895xcc_hw_data.h"
+
+/* Worker thread to service arbiter mappings based on dev SKUs */
+static const uint32_t thrd_to_arb_map_sku4[] = {
+	0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
+	0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000
+};
+
+static const uint32_t thrd_to_arb_map_sku6[] = {
+	0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
+	0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
+	0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222
+};
+
+static struct adf_hw_device_class dh895xcc_class = {
+	.name = ADF_DH895XCC_DEVICE_NAME,
+	.type = DEV_DH895XCC,
+	.instances = 0
+};
+
+static uint32_t get_accel_mask(uint32_t fuse)
+{
+	return (~fuse) >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET &
+			  ADF_DH895XCC_ACCELERATORS_MASK;
+}
+
+static uint32_t get_ae_mask(uint32_t fuse)
+{
+	return (~fuse) & ADF_DH895XCC_ACCELENGINES_MASK;
+}
+
+static uint32_t get_num_accels(struct adf_hw_device_data *self)
+{
+	uint32_t i, ctr = 0;
+
+	if (!self || !self->accel_mask)
+		return 0;
+
+	for (i = 0; i < ADF_DH895XCC_MAX_ACCELERATORS; i++) {
+		if (self->accel_mask & (1 << i))
+			ctr++;
+	}
+	return ctr;
+}
+
+static uint32_t get_num_aes(struct adf_hw_device_data *self)
+{
+	uint32_t i, ctr = 0;
+
+	if (!self || !self->ae_mask)
+		return 0;
+
+	for (i = 0; i < ADF_DH895XCC_MAX_ACCELENGINES; i++) {
+		if (self->ae_mask & (1 << i))
+			ctr++;
+	}
+	return ctr;
+}
+
+static uint32_t get_misc_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_DH895XCC_PMISC_BAR;
+}
+
+static uint32_t get_etr_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_DH895XCC_ETR_BAR;
+}
+
+static uint32_t get_sram_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_DH895XCC_SRAM_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+	int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK)
+	    >> ADF_DH895XCC_FUSECTL_SKU_SHIFT;
+
+	switch (sku) {
+	case ADF_DH895XCC_FUSECTL_SKU_1:
+		return DEV_SKU_1;
+	case ADF_DH895XCC_FUSECTL_SKU_2:
+		return DEV_SKU_2;
+	case ADF_DH895XCC_FUSECTL_SKU_3:
+		return DEV_SKU_3;
+	case ADF_DH895XCC_FUSECTL_SKU_4:
+		return DEV_SKU_4;
+	default:
+		return DEV_SKU_UNKNOWN;
+	}
+	return DEV_SKU_UNKNOWN;
+}
+
+static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
+				    u32 const **arb_map_config)
+{
+	switch (accel_dev->accel_pci_dev.sku) {
+	case DEV_SKU_1:
+		*arb_map_config = thrd_to_arb_map_sku4;
+		break;
+
+	case DEV_SKU_2:
+	case DEV_SKU_4:
+		*arb_map_config = thrd_to_arb_map_sku6;
+		break;
+	default:
+		dev_err(&GET_DEV(accel_dev),
+			"The configuration doesn't match any SKU");
+		*arb_map_config = NULL;
+	}
+}
+
+static uint32_t get_pf2vf_offset(uint32_t i)
+{
+	return ADF_DH895XCC_PF2VF_OFFSET(i);
+}
+
+static uint32_t get_vintmsk_offset(uint32_t i)
+{
+	return ADF_DH895XCC_VINTMSK_OFFSET(i);
+}
+
+static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
+{
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+	struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR];
+	void __iomem *csr = misc_bar->virt_addr;
+	unsigned int val, i;
+
+	/* Enable Accel Engine error detection & correction */
+	for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
+		val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_CTX_ENABLES(i));
+		val |= ADF_DH895XCC_ENABLE_AE_ECC_ERR;
+		ADF_CSR_WR(csr, ADF_DH895XCC_AE_CTX_ENABLES(i), val);
+		val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_MISC_CONTROL(i));
+		val |= ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR;
+		ADF_CSR_WR(csr, ADF_DH895XCC_AE_MISC_CONTROL(i), val);
+	}
+
+	/* Enable shared memory error detection & correction */
+	for (i = 0; i < hw_device->get_num_accels(hw_device); i++) {
+		val = ADF_CSR_RD(csr, ADF_DH895XCC_UERRSSMSH(i));
+		val |= ADF_DH895XCC_ERRSSMSH_EN;
+		ADF_CSR_WR(csr, ADF_DH895XCC_UERRSSMSH(i), val);
+		val = ADF_CSR_RD(csr, ADF_DH895XCC_CERRSSMSH(i));
+		val |= ADF_DH895XCC_ERRSSMSH_EN;
+		ADF_CSR_WR(csr, ADF_DH895XCC_CERRSSMSH(i), val);
+	}
+}
+
+static void adf_enable_ints(struct adf_accel_dev *accel_dev)
+{
+	void __iomem *addr;
+
+	addr = (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr;
+
+	/* Enable bundle and misc interrupts */
+	ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET,
+		   accel_dev->pf.vf_info ? 0 :
+			GENMASK_ULL(GET_MAX_BANKS(accel_dev) - 1, 0));
+	ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET,
+		   ADF_DH895XCC_SMIA1_MASK);
+}
+
+static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+{
+	return 0;
+}
+
+void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class = &dh895xcc_class;
+	hw_data->instance_id = dh895xcc_class.instances++;
+	hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS;
+	hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
+	hw_data->num_logical_accel = 1;
+	hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
+	hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET;
+	hw_data->tx_rings_mask = ADF_DH895XCC_TX_RINGS_MASK;
+	hw_data->alloc_irq = adf_isr_resource_alloc;
+	hw_data->free_irq = adf_isr_resource_free;
+	hw_data->enable_error_correction = adf_enable_error_correction;
+	hw_data->get_accel_mask = get_accel_mask;
+	hw_data->get_ae_mask = get_ae_mask;
+	hw_data->get_num_accels = get_num_accels;
+	hw_data->get_num_aes = get_num_aes;
+	hw_data->get_etr_bar_id = get_etr_bar_id;
+	hw_data->get_misc_bar_id = get_misc_bar_id;
+	hw_data->get_pf2vf_offset = get_pf2vf_offset;
+	hw_data->get_vintmsk_offset = get_vintmsk_offset;
+	hw_data->get_sram_bar_id = get_sram_bar_id;
+	hw_data->get_sku = get_sku;
+	hw_data->fw_name = ADF_DH895XCC_FW;
+	hw_data->fw_mmp_name = ADF_DH895XCC_MMP;
+	hw_data->init_admin_comms = adf_init_admin_comms;
+	hw_data->exit_admin_comms = adf_exit_admin_comms;
+	hw_data->disable_iov = adf_disable_sriov;
+	hw_data->send_admin_init = adf_send_admin_init;
+	hw_data->init_arb = adf_init_arb;
+	hw_data->exit_arb = adf_exit_arb;
+	hw_data->get_arb_mapping = adf_get_arbiter_mapping;
+	hw_data->enable_ints = adf_enable_ints;
+	hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
+	hw_data->reset_device = adf_reset_sbr;
+	hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+}
+
+void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class->instances--;
+}
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
new file mode 100644
index 0000000..092f735
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
@@ -0,0 +1,89 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_DH895x_HW_DATA_H_
+#define ADF_DH895x_HW_DATA_H_
+
+/* PCIe configuration space */
+#define ADF_DH895XCC_SRAM_BAR 0
+#define ADF_DH895XCC_PMISC_BAR 1
+#define ADF_DH895XCC_ETR_BAR 2
+#define ADF_DH895XCC_RX_RINGS_OFFSET 8
+#define ADF_DH895XCC_TX_RINGS_MASK 0xFF
+#define ADF_DH895XCC_FUSECTL_SKU_MASK 0x300000
+#define ADF_DH895XCC_FUSECTL_SKU_SHIFT 20
+#define ADF_DH895XCC_FUSECTL_SKU_1 0x0
+#define ADF_DH895XCC_FUSECTL_SKU_2 0x1
+#define ADF_DH895XCC_FUSECTL_SKU_3 0x2
+#define ADF_DH895XCC_FUSECTL_SKU_4 0x3
+#define ADF_DH895XCC_MAX_ACCELERATORS 6
+#define ADF_DH895XCC_MAX_ACCELENGINES 12
+#define ADF_DH895XCC_ACCELERATORS_REG_OFFSET 13
+#define ADF_DH895XCC_ACCELERATORS_MASK 0x3F
+#define ADF_DH895XCC_ACCELENGINES_MASK 0xFFF
+#define ADF_DH895XCC_ETR_MAX_BANKS 32
+#define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
+#define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
+#define ADF_DH895XCC_SMIA0_MASK 0xFFFFFFFF
+#define ADF_DH895XCC_SMIA1_MASK 0x1
+/* Error detection and correction */
+#define ADF_DH895XCC_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
+#define ADF_DH895XCC_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960)
+#define ADF_DH895XCC_ENABLE_AE_ECC_ERR BIT(28)
+#define ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12))
+#define ADF_DH895XCC_UERRSSMSH(i) (i * 0x4000 + 0x18)
+#define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10)
+#define ADF_DH895XCC_ERRSSMSH_EN BIT(3)
+
+#define ADF_DH895XCC_PF2VF_OFFSET(i)	(0x3A000 + 0x280 + ((i) * 0x04))
+#define ADF_DH895XCC_VINTMSK_OFFSET(i)	(0x3A000 + 0x200 + ((i) * 0x04))
+/* FW names */
+#define ADF_DH895XCC_FW "qat_895xcc.bin"
+#define ADF_DH895XCC_MMP "qat_895xcc_mmp.bin"
+
+void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
new file mode 100644
index 0000000..07b741a
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -0,0 +1,336 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_dh895xcc_hw_data.h"
+
+#define ADF_SYSTEM_DEVICE(device_id) \
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+	ADF_SYSTEM_DEVICE(ADF_DH895XCC_PCI_DEVICE_ID),
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+	.id_table = adf_pci_tbl,
+	.name = ADF_DH895XCC_DEVICE_NAME,
+	.probe = adf_probe,
+	.remove = adf_remove,
+	.sriov_configure = adf_sriov_configure,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+	pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+	pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+	int i;
+
+	for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+		if (bar->virt_addr)
+			pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+	}
+
+	if (accel_dev->hw_device) {
+		switch (accel_pci_dev->pci_dev->device) {
+		case ADF_DH895XCC_PCI_DEVICE_ID:
+			adf_clean_hw_data_dh895xcc(accel_dev->hw_device);
+			break;
+		default:
+			break;
+		}
+		kfree(accel_dev->hw_device);
+		accel_dev->hw_device = NULL;
+	}
+	adf_cfg_dev_remove(accel_dev);
+	debugfs_remove(accel_dev->debugfs_dir);
+	adf_devmgr_rm_dev(accel_dev, NULL);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct adf_accel_dev *accel_dev;
+	struct adf_accel_pci *accel_pci_dev;
+	struct adf_hw_device_data *hw_data;
+	char name[ADF_DEVICE_NAME_LENGTH];
+	unsigned int i, bar_nr;
+	unsigned long bar_mask;
+	int ret;
+
+	switch (ent->device) {
+	case ADF_DH895XCC_PCI_DEVICE_ID:
+		break;
+	default:
+		dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+		return -ENODEV;
+	}
+
+	if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+		/* If the accelerator is connected to a node with no memory
+		 * there is no point in using the accelerator since the remote
+		 * memory transaction will be very slow. */
+		dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+		return -EINVAL;
+	}
+
+	accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+				 dev_to_node(&pdev->dev));
+	if (!accel_dev)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&accel_dev->crypto_list);
+	accel_pci_dev = &accel_dev->accel_pci_dev;
+	accel_pci_dev->pci_dev = pdev;
+
+	/* Add accel device to accel table.
+	 * This should be called before adf_cleanup_accel is called */
+	if (adf_devmgr_add_dev(accel_dev, NULL)) {
+		dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+		kfree(accel_dev);
+		return -EFAULT;
+	}
+
+	accel_dev->owner = THIS_MODULE;
+	/* Allocate and configure device configuration structure */
+	hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+			       dev_to_node(&pdev->dev));
+	if (!hw_data) {
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	accel_dev->hw_device = hw_data;
+	adf_init_hw_data_dh895xcc(accel_dev->hw_device);
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+	pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
+			      &hw_data->fuses);
+
+	/* Get Accelerators and Accelerators Engines masks */
+	hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
+	hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
+	accel_pci_dev->sku = hw_data->get_sku(hw_data);
+	/* If the device has no acceleration engines then ignore it. */
+	if (!hw_data->accel_mask || !hw_data->ae_mask ||
+	    ((~hw_data->ae_mask) & 0x01)) {
+		dev_err(&pdev->dev, "No acceleration units found");
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* Create dev top level debugfs entry */
+	snprintf(name, sizeof(name), "%s%s_%02x:%02d.%d",
+		 ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
+		 pdev->bus->number, PCI_SLOT(pdev->devfn),
+		 PCI_FUNC(pdev->devfn));
+
+	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+	if (!accel_dev->debugfs_dir) {
+		dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+	/* Create device configuration table */
+	ret = adf_cfg_dev_add(accel_dev);
+	if (ret)
+		goto out_err;
+
+	pcie_set_readrq(pdev, 1024);
+
+	/* enable PCI device */
+	if (pci_enable_device(pdev)) {
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* set dma identifier */
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+			dev_err(&pdev->dev, "No usable DMA configuration\n");
+			ret = -EFAULT;
+			goto out_err_disable;
+		} else {
+			pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		}
+
+	} else {
+		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	}
+
+	if (pci_request_regions(pdev, ADF_DH895XCC_DEVICE_NAME)) {
+		ret = -EFAULT;
+		goto out_err_disable;
+	}
+
+	/* Read accelerator capabilities mask */
+	pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET,
+			      &hw_data->accel_capabilities_mask);
+
+	/* Find and map all the device's BARS */
+	i = 0;
+	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+		bar->base_addr = pci_resource_start(pdev, bar_nr);
+		if (!bar->base_addr)
+			break;
+		bar->size = pci_resource_len(pdev, bar_nr);
+		bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+		if (!bar->virt_addr) {
+			dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+			ret = -EFAULT;
+			goto out_err_free_reg;
+		}
+	}
+	pci_set_master(pdev);
+
+	if (adf_enable_aer(accel_dev, &adf_driver)) {
+		dev_err(&pdev->dev, "Failed to enable aer\n");
+		ret = -EFAULT;
+		goto out_err_free_reg;
+	}
+
+	if (pci_save_state(pdev)) {
+		dev_err(&pdev->dev, "Failed to save pci state\n");
+		ret = -ENOMEM;
+		goto out_err_free_reg;
+	}
+
+	ret = qat_crypto_dev_config(accel_dev);
+	if (ret)
+		goto out_err_free_reg;
+
+	ret = adf_dev_init(accel_dev);
+	if (ret)
+		goto out_err_dev_shutdown;
+
+	ret = adf_dev_start(accel_dev);
+	if (ret)
+		goto out_err_dev_stop;
+
+	return ret;
+
+out_err_dev_stop:
+	adf_dev_stop(accel_dev);
+out_err_dev_shutdown:
+	adf_dev_shutdown(accel_dev);
+out_err_free_reg:
+	pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+	pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+	adf_cleanup_accel(accel_dev);
+	kfree(accel_dev);
+	return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+	if (!accel_dev) {
+		pr_err("QAT: Driver removal failed\n");
+		return;
+	}
+	adf_dev_stop(accel_dev);
+	adf_dev_shutdown(accel_dev);
+	adf_disable_aer(accel_dev);
+	adf_cleanup_accel(accel_dev);
+	adf_cleanup_pci_dev(accel_dev);
+	kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+	request_module("intel_qat");
+
+	if (pci_register_driver(&adf_driver)) {
+		pr_err("QAT: Driver initialization failed\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+	pci_unregister_driver(&adf_driver);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE(ADF_DH895XCC_FW);
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_dh895xccvf/Makefile b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_dh895xccvf/Makefile
new file mode 100644
index 0000000..5c3ccf8
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_dh895xccvf/Makefile
@@ -0,0 +1,3 @@
+ccflags-y := -I$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o
+qat_dh895xccvf-objs := adf_drv.o adf_dh895xccvf_hw_data.o
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
new file mode 100644
index 0000000..a3b4dd8
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
@@ -0,0 +1,150 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include <adf_pf2vf_msg.h>
+#include <adf_common_drv.h>
+#include "adf_dh895xccvf_hw_data.h"
+
+static struct adf_hw_device_class dh895xcciov_class = {
+	.name = ADF_DH895XCCVF_DEVICE_NAME,
+	.type = DEV_DH895XCCVF,
+	.instances = 0
+};
+
+static u32 get_accel_mask(u32 fuse)
+{
+	return ADF_DH895XCCIOV_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(u32 fuse)
+{
+	return ADF_DH895XCCIOV_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+	return ADF_DH895XCCIOV_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+	return ADF_DH895XCCIOV_MAX_ACCELENGINES;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_DH895XCCIOV_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_DH895XCCIOV_ETR_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+	return DEV_SKU_VF;
+}
+
+static u32 get_pf2vf_offset(u32 i)
+{
+	return ADF_DH895XCCIOV_PF2VF_OFFSET;
+}
+
+static u32 get_vintmsk_offset(u32 i)
+{
+	return ADF_DH895XCCIOV_VINTMSK_OFFSET;
+}
+
+static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
+{
+	return 0;
+}
+
+static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
+{
+}
+
+void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class = &dh895xcciov_class;
+	hw_data->num_banks = ADF_DH895XCCIOV_ETR_MAX_BANKS;
+	hw_data->num_accel = ADF_DH895XCCIOV_MAX_ACCELERATORS;
+	hw_data->num_logical_accel = 1;
+	hw_data->num_engines = ADF_DH895XCCIOV_MAX_ACCELENGINES;
+	hw_data->tx_rx_gap = ADF_DH895XCCIOV_RX_RINGS_OFFSET;
+	hw_data->tx_rings_mask = ADF_DH895XCCIOV_TX_RINGS_MASK;
+	hw_data->alloc_irq = adf_vf_isr_resource_alloc;
+	hw_data->free_irq = adf_vf_isr_resource_free;
+	hw_data->enable_error_correction = adf_vf_void_noop;
+	hw_data->init_admin_comms = adf_vf_int_noop;
+	hw_data->exit_admin_comms = adf_vf_void_noop;
+	hw_data->send_admin_init = adf_vf2pf_init;
+	hw_data->init_arb = adf_vf_int_noop;
+	hw_data->exit_arb = adf_vf_void_noop;
+	hw_data->disable_iov = adf_vf2pf_shutdown;
+	hw_data->get_accel_mask = get_accel_mask;
+	hw_data->get_ae_mask = get_ae_mask;
+	hw_data->get_num_accels = get_num_accels;
+	hw_data->get_num_aes = get_num_aes;
+	hw_data->get_etr_bar_id = get_etr_bar_id;
+	hw_data->get_misc_bar_id = get_misc_bar_id;
+	hw_data->get_pf2vf_offset = get_pf2vf_offset;
+	hw_data->get_vintmsk_offset = get_vintmsk_offset;
+	hw_data->get_sku = get_sku;
+	hw_data->enable_ints = adf_vf_void_noop;
+	hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
+	hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+	hw_data->dev_class->instances++;
+	adf_devmgr_update_class_index(hw_data);
+}
+
+void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class->instances--;
+	adf_devmgr_update_class_index(hw_data);
+}
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
new file mode 100644
index 0000000..6ddc19b
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
@@ -0,0 +1,64 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_DH895XVF_HW_DATA_H_
+#define ADF_DH895XVF_HW_DATA_H_
+
+#define ADF_DH895XCCIOV_PMISC_BAR 1
+#define ADF_DH895XCCIOV_ACCELERATORS_MASK 0x1
+#define ADF_DH895XCCIOV_ACCELENGINES_MASK 0x1
+#define ADF_DH895XCCIOV_MAX_ACCELERATORS 1
+#define ADF_DH895XCCIOV_MAX_ACCELENGINES 1
+#define ADF_DH895XCCIOV_RX_RINGS_OFFSET 8
+#define ADF_DH895XCCIOV_TX_RINGS_MASK 0xFF
+#define ADF_DH895XCCIOV_ETR_BAR 0
+#define ADF_DH895XCCIOV_ETR_MAX_BANKS 1
+#define ADF_DH895XCCIOV_PF2VF_OFFSET	0x200
+#define ADF_DH895XCCIOV_VINTMSK_OFFSET	0x208
+
+void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
new file mode 100644
index 0000000..3da0f95
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -0,0 +1,305 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_dh895xccvf_hw_data.h"
+
+#define ADF_SYSTEM_DEVICE(device_id) \
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+	ADF_SYSTEM_DEVICE(ADF_DH895XCCIOV_PCI_DEVICE_ID),
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+	.id_table = adf_pci_tbl,
+	.name = ADF_DH895XCCVF_DEVICE_NAME,
+	.probe = adf_probe,
+	.remove = adf_remove,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+	pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+	pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+	struct adf_accel_dev *pf;
+	int i;
+
+	for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+		if (bar->virt_addr)
+			pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+	}
+
+	if (accel_dev->hw_device) {
+		switch (accel_pci_dev->pci_dev->device) {
+		case ADF_DH895XCCIOV_PCI_DEVICE_ID:
+			adf_clean_hw_data_dh895xcciov(accel_dev->hw_device);
+			break;
+		default:
+			break;
+		}
+		kfree(accel_dev->hw_device);
+		accel_dev->hw_device = NULL;
+	}
+	adf_cfg_dev_remove(accel_dev);
+	debugfs_remove(accel_dev->debugfs_dir);
+	pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
+	adf_devmgr_rm_dev(accel_dev, pf);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct adf_accel_dev *accel_dev;
+	struct adf_accel_dev *pf;
+	struct adf_accel_pci *accel_pci_dev;
+	struct adf_hw_device_data *hw_data;
+	char name[ADF_DEVICE_NAME_LENGTH];
+	unsigned int i, bar_nr;
+	unsigned long bar_mask;
+	int ret;
+
+	switch (ent->device) {
+	case ADF_DH895XCCIOV_PCI_DEVICE_ID:
+		break;
+	default:
+		dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+		return -ENODEV;
+	}
+
+	accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+				 dev_to_node(&pdev->dev));
+	if (!accel_dev)
+		return -ENOMEM;
+
+	accel_dev->is_vf = true;
+	pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
+	accel_pci_dev = &accel_dev->accel_pci_dev;
+	accel_pci_dev->pci_dev = pdev;
+
+	/* Add accel device to accel table */
+	if (adf_devmgr_add_dev(accel_dev, pf)) {
+		dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+		kfree(accel_dev);
+		return -EFAULT;
+	}
+	INIT_LIST_HEAD(&accel_dev->crypto_list);
+
+	accel_dev->owner = THIS_MODULE;
+	/* Allocate and configure device configuration structure */
+	hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+			       dev_to_node(&pdev->dev));
+	if (!hw_data) {
+		ret = -ENOMEM;
+		goto out_err;
+	}
+	accel_dev->hw_device = hw_data;
+	adf_init_hw_data_dh895xcciov(accel_dev->hw_device);
+
+	/* Get Accelerators and Accelerators Engines masks */
+	hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
+	hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
+	accel_pci_dev->sku = hw_data->get_sku(hw_data);
+
+	/* Create dev top level debugfs entry */
+	snprintf(name, sizeof(name), "%s%s_%02x:%02d.%d",
+		 ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
+		 pdev->bus->number, PCI_SLOT(pdev->devfn),
+		 PCI_FUNC(pdev->devfn));
+
+	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+	if (!accel_dev->debugfs_dir) {
+		dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+	/* Create device configuration table */
+	ret = adf_cfg_dev_add(accel_dev);
+	if (ret)
+		goto out_err;
+
+	/* enable PCI device */
+	if (pci_enable_device(pdev)) {
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* set dma identifier */
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+			dev_err(&pdev->dev, "No usable DMA configuration\n");
+			ret = -EFAULT;
+			goto out_err_disable;
+		} else {
+			pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		}
+
+	} else {
+		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	}
+
+	if (pci_request_regions(pdev, ADF_DH895XCCVF_DEVICE_NAME)) {
+		ret = -EFAULT;
+		goto out_err_disable;
+	}
+
+	/* Find and map all the device's BARS */
+	i = 0;
+	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+	for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+		bar->base_addr = pci_resource_start(pdev, bar_nr);
+		if (!bar->base_addr)
+			break;
+		bar->size = pci_resource_len(pdev, bar_nr);
+		bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+		if (!bar->virt_addr) {
+			dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+			ret = -EFAULT;
+			goto out_err_free_reg;
+		}
+	}
+	pci_set_master(pdev);
+	/* Completion for VF2PF request/response message exchange */
+	init_completion(&accel_dev->vf.iov_msg_completion);
+
+	ret = qat_crypto_dev_config(accel_dev);
+	if (ret)
+		goto out_err_free_reg;
+
+	set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
+	ret = adf_dev_init(accel_dev);
+	if (ret)
+		goto out_err_dev_shutdown;
+
+	ret = adf_dev_start(accel_dev);
+	if (ret)
+		goto out_err_dev_stop;
+
+	return ret;
+
+out_err_dev_stop:
+	adf_dev_stop(accel_dev);
+out_err_dev_shutdown:
+	adf_dev_shutdown(accel_dev);
+out_err_free_reg:
+	pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+	pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+	adf_cleanup_accel(accel_dev);
+	kfree(accel_dev);
+	return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+	if (!accel_dev) {
+		pr_err("QAT: Driver removal failed\n");
+		return;
+	}
+	adf_dev_stop(accel_dev);
+	adf_dev_shutdown(accel_dev);
+	adf_cleanup_accel(accel_dev);
+	adf_cleanup_pci_dev(accel_dev);
+	kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+	request_module("intel_qat");
+
+	if (pci_register_driver(&adf_driver)) {
+		pr_err("QAT: Driver initialization failed\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+	pci_unregister_driver(&adf_driver);
+	adf_clean_vf_map(true);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);