ASR_BASE

Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/linux/drivers/pci/controller/dwc/Kconfig b/marvell/linux/drivers/pci/controller/dwc/Kconfig
new file mode 100644
index 0000000..ab4025e
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/Kconfig
@@ -0,0 +1,280 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "DesignWare PCI Core Support"
+	depends on PCI
+
+config PCIE_DW
+	bool
+
+config PCIE_DW_HOST
+	bool
+	select PCIE_DW
+
+config PCIE_DW_EP
+	bool
+	depends on PCI_ENDPOINT
+	select PCIE_DW
+
+config PCI_DRA7XX
+	bool
+
+config PCI_DRA7XX_HOST
+	bool "TI DRA7xx PCIe controller Host Mode"
+	depends on SOC_DRA7XX || COMPILE_TEST
+	depends on PCI_MSI_IRQ_DOMAIN
+	depends on OF && HAS_IOMEM && TI_PIPE3
+	select PCIE_DW_HOST
+	select PCI_DRA7XX
+	default y
+	help
+	  Enables support for the PCIe controller in the DRA7xx SoC to work in
+	  host mode. There are two instances of PCIe controller in DRA7xx.
+	  This controller can work either as EP or RC. In order to enable
+	  host-specific features PCI_DRA7XX_HOST must be selected and in order
+	  to enable device-specific features PCI_DRA7XX_EP must be selected.
+	  This uses the DesignWare core.
+
+config PCI_DRA7XX_EP
+	bool "TI DRA7xx PCIe controller Endpoint Mode"
+	depends on SOC_DRA7XX || COMPILE_TEST
+	depends on PCI_ENDPOINT
+	depends on OF && HAS_IOMEM && TI_PIPE3
+	select PCIE_DW_EP
+	select PCI_DRA7XX
+	help
+	  Enables support for the PCIe controller in the DRA7xx SoC to work in
+	  endpoint mode. There are two instances of PCIe controller in DRA7xx.
+	  This controller can work either as EP or RC. In order to enable
+	  host-specific features PCI_DRA7XX_HOST must be selected and in order
+	  to enable device-specific features PCI_DRA7XX_EP must be selected.
+	  This uses the DesignWare core.
+
+config PCIE_DW_PLAT
+	bool
+
+config PCIE_DW_PLAT_HOST
+	bool "Platform bus based DesignWare PCIe Controller - Host mode"
+	depends on PCI && PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	select PCIE_DW_PLAT
+	help
+	  Enables support for the PCIe controller in the Designware IP to
+	  work in host mode. There are two instances of PCIe controller in
+	  Designware IP.
+	  This controller can work either as EP or RC. In order to enable
+	  host-specific features PCIE_DW_PLAT_HOST must be selected and in
+	  order to enable device-specific features PCI_DW_PLAT_EP must be
+	  selected.
+
+config PCIE_DW_PLAT_EP
+	bool "Platform bus based DesignWare PCIe Controller - Endpoint mode"
+	depends on PCI && PCI_MSI_IRQ_DOMAIN
+	depends on PCI_ENDPOINT
+	select PCIE_DW_EP
+	select PCIE_DW_PLAT
+	help
+	  Enables support for the PCIe controller in the Designware IP to
+	  work in endpoint mode. There are two instances of PCIe controller
+	  in Designware IP.
+	  This controller can work either as EP or RC. In order to enable
+	  host-specific features PCIE_DW_PLAT_HOST must be selected and in
+	  order to enable device-specific features PCI_DW_PLAT_EP must be
+	  selected.
+
+config PCI_EXYNOS
+	bool "Samsung Exynos PCIe controller"
+	depends on SOC_EXYNOS5440 || COMPILE_TEST
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+
+config PCI_IMX6
+	bool "Freescale i.MX6/7/8 PCIe controller"
+	depends on ARCH_MXC || COMPILE_TEST
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+
+config PCIE_SPEAR13XX
+	bool "STMicroelectronics SPEAr PCIe controller"
+	depends on ARCH_SPEAR13XX || COMPILE_TEST
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	help
+	  Say Y here if you want PCIe support on SPEAr13XX SoCs.
+
+config PCI_KEYSTONE
+	bool
+
+config PCI_KEYSTONE_HOST
+	bool "PCI Keystone Host Mode"
+	depends on ARCH_KEYSTONE || ARCH_K3 || ((ARM || ARM64) && COMPILE_TEST)
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	select PCI_KEYSTONE
+	default y
+	help
+	  Enables support for the PCIe controller in the Keystone SoC to
+	  work in host mode. The PCI controller on Keystone is based on
+	  DesignWare hardware and therefore the driver re-uses the
+	  DesignWare core functions to implement the driver.
+
+config PCI_KEYSTONE_EP
+	bool "PCI Keystone Endpoint Mode"
+	depends on ARCH_KEYSTONE || ARCH_K3 || ((ARM || ARM64) && COMPILE_TEST)
+	depends on PCI_ENDPOINT
+	select PCIE_DW_EP
+	select PCI_KEYSTONE
+	help
+	  Enables support for the PCIe controller in the Keystone SoC to
+	  work in endpoint mode. The PCI controller on Keystone is based
+	  on DesignWare hardware and therefore the driver re-uses the
+	  DesignWare core functions to implement the driver.
+
+config PCI_LAYERSCAPE
+	bool "Freescale Layerscape PCIe controller - Host mode"
+	depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST)
+	depends on PCI_MSI_IRQ_DOMAIN
+	select MFD_SYSCON
+	select PCIE_DW_HOST
+	help
+	  Say Y here if you want to enable PCIe controller support on Layerscape
+	  SoCs to work in Host mode.
+	  This controller can work either as EP or RC. The RCW[HOST_AGT_PEX]
+	  determines which PCIe controller works in EP mode and which PCIe
+	  controller works in RC mode.
+
+config PCI_LAYERSCAPE_EP
+	bool "Freescale Layerscape PCIe controller - Endpoint mode"
+	depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST)
+	depends on PCI_ENDPOINT
+	select PCIE_DW_EP
+	help
+	  Say Y here if you want to enable PCIe controller support on Layerscape
+	  SoCs to work in Endpoint mode.
+	  This controller can work either as EP or RC. The RCW[HOST_AGT_PEX]
+	  determines which PCIe controller works in EP mode and which PCIe
+	  controller works in RC mode.
+
+config PCI_HISI
+	depends on OF && (ARM64 || COMPILE_TEST)
+	bool "HiSilicon Hip05 and Hip06 SoCs PCIe controllers"
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	select PCI_HOST_COMMON
+	help
+	  Say Y here if you want PCIe controller support on HiSilicon
+	  Hip05 and Hip06 SoCs
+
+config PCIE_QCOM
+	bool "Qualcomm PCIe controller"
+	depends on OF && (ARCH_QCOM || COMPILE_TEST)
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	help
+	  Say Y here to enable PCIe controller support on Qualcomm SoCs. The
+	  PCIe controller uses the DesignWare core plus Qualcomm-specific
+	  hardware wrappers.
+
+config PCIE_ARMADA_8K
+	bool "Marvell Armada-8K PCIe controller"
+	depends on ARCH_MVEBU || COMPILE_TEST
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	help
+	  Say Y here if you want to enable PCIe controller support on
+	  Armada-8K SoCs. The PCIe controller on Armada-8K is based on
+	  DesignWare hardware and therefore the driver re-uses the
+	  DesignWare core functions to implement the driver.
+
+config PCIE_ARTPEC6
+	bool
+
+config PCIE_ARTPEC6_HOST
+	bool "Axis ARTPEC-6 PCIe controller Host Mode"
+	depends on MACH_ARTPEC6 || COMPILE_TEST
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	select PCIE_ARTPEC6
+	help
+	  Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
+	  host mode. This uses the DesignWare core.
+
+config PCIE_ARTPEC6_EP
+	bool "Axis ARTPEC-6 PCIe controller Endpoint Mode"
+	depends on MACH_ARTPEC6 || COMPILE_TEST
+	depends on PCI_ENDPOINT
+	select PCIE_DW_EP
+	select PCIE_ARTPEC6
+	help
+	  Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
+	  endpoint mode. This uses the DesignWare core.
+
+config PCIE_KIRIN
+	depends on OF && (ARM64 || COMPILE_TEST)
+	bool "HiSilicon Kirin series SoCs PCIe controllers"
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	help
+	  Say Y here if you want PCIe controller support
+	  on HiSilicon Kirin series SoCs.
+
+config PCIE_HISI_STB
+	bool "HiSilicon STB SoCs PCIe controllers"
+	depends on ARCH_HISI || COMPILE_TEST
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	help
+          Say Y here if you want PCIe controller support on HiSilicon STB SoCs
+
+config PCI_MESON
+	bool "MESON PCIe controller"
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	help
+	  Say Y here if you want to enable PCI controller support on Amlogic
+	  SoCs. The PCI controller on Amlogic is based on DesignWare hardware
+	  and therefore the driver re-uses the DesignWare core functions to
+	  implement the driver.
+
+config PCIE_TEGRA194
+	tristate "NVIDIA Tegra194 (and later) PCIe controller"
+	depends on ARCH_TEGRA_194_SOC || COMPILE_TEST
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	select PHY_TEGRA194_P2U
+	help
+	  Say Y here if you want support for DesignWare core based PCIe host
+	  controller found in NVIDIA Tegra194 SoC.
+
+config PCIE_UNIPHIER
+	bool "Socionext UniPhier PCIe controllers"
+	depends on ARCH_UNIPHIER || COMPILE_TEST
+	depends on OF && HAS_IOMEM
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	help
+	  Say Y here if you want PCIe controller support on UniPhier SoCs.
+	  This driver supports LD20 and PXs3 SoCs.
+
+config PCIE_AL
+	bool "Amazon Annapurna Labs PCIe controller"
+	depends on OF && (ARM64 || COMPILE_TEST)
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	help
+	  Say Y here to enable support of the Amazon's Annapurna Labs PCIe
+	  controller IP on Amazon SoCs. The PCIe controller uses the DesignWare
+	  core plus Annapurna Labs proprietary hardware wrappers. This is
+	  required only for DT-based platforms. ACPI platforms with the
+	  Annapurna Labs PCIe controller don't need to enable this.
+
+config PCIE_ASR1901
+	bool "ASR1901 PCIe controller"
+	depends on CPU_ASR1901 || COMPILE_TEST
+	select PCIE_DW_HOST
+	help
+	  Say Y here if you want to enable PCIe controller support on
+	  ASR1901 SoCs. The PCIe controller on ASR1901 is based on
+	  DesignWare hardware and therefore the driver re-uses the
+	  DesignWare core functions to implement the driver.
+
+endmenu
diff --git a/marvell/linux/drivers/pci/controller/dwc/Makefile b/marvell/linux/drivers/pci/controller/dwc/Makefile
new file mode 100644
index 0000000..e0812f8
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PCIE_DW) += pcie-designware.o
+obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o
+obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o
+obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
+obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
+obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
+obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
+obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
+obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o
+obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
+obj-$(CONFIG_PCI_LAYERSCAPE_EP) += pci-layerscape-ep.o
+obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
+obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
+obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
+obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
+obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
+obj-$(CONFIG_PCI_MESON) += pci-meson.o
+obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o
+obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
+obj-$(CONFIG_PCIE_ASR1901) += pcie-kestrel.o
+# The following drivers are for devices that use the generic ACPI
+# pci_root.c driver but don't support standard ECAM config access.
+# They contain MCFG quirks to replace the generic ECAM accessors with
+# device-specific ones that are shared with the DT driver.
+
+# The ACPI driver is generic and should not require driver-specific
+# config options to be enabled, so we always build these drivers on
+# ARM64 and use internal ifdefs to only build the pieces we need
+# depending on whether ACPI, the DT driver, or both are enabled.
+
+ifdef CONFIG_PCI
+obj-$(CONFIG_ARM64) += pcie-al.o
+obj-$(CONFIG_ARM64) += pcie-hisi.o
+endif
diff --git a/marvell/linux/drivers/pci/controller/dwc/pci-dra7xx.c b/marvell/linux/drivers/pci/controller/dwc/pci-dra7xx.c
new file mode 100644
index 0000000..4234ddb
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -0,0 +1,947 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs
+ *
+ * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/gpio/consumer.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+/* PCIe controller wrapper DRA7XX configuration registers */
+
+#define	PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN		0x0024
+#define	PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN		0x0028
+#define	ERR_SYS						BIT(0)
+#define	ERR_FATAL					BIT(1)
+#define	ERR_NONFATAL					BIT(2)
+#define	ERR_COR						BIT(3)
+#define	ERR_AXI						BIT(4)
+#define	ERR_ECRC					BIT(5)
+#define	PME_TURN_OFF					BIT(8)
+#define	PME_TO_ACK					BIT(9)
+#define	PM_PME						BIT(10)
+#define	LINK_REQ_RST					BIT(11)
+#define	LINK_UP_EVT					BIT(12)
+#define	CFG_BME_EVT					BIT(13)
+#define	CFG_MSE_EVT					BIT(14)
+#define	INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \
+			ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \
+			LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT)
+
+#define	PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI		0x0034
+#define	PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI		0x0038
+#define	INTA						BIT(0)
+#define	INTB						BIT(1)
+#define	INTC						BIT(2)
+#define	INTD						BIT(3)
+#define	MSI						BIT(4)
+#define	LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
+
+#define	PCIECTRL_TI_CONF_DEVICE_TYPE			0x0100
+#define	DEVICE_TYPE_EP					0x0
+#define	DEVICE_TYPE_LEG_EP				0x1
+#define	DEVICE_TYPE_RC					0x4
+
+#define	PCIECTRL_DRA7XX_CONF_DEVICE_CMD			0x0104
+#define	LTSSM_EN					0x1
+
+#define	PCIECTRL_DRA7XX_CONF_PHY_CS			0x010C
+#define	LINK_UP						BIT(16)
+#define	DRA7XX_CPU_TO_BUS_ADDR				0x0FFFFFFF
+
+#define EXP_CAP_ID_OFFSET				0x70
+
+#define	PCIECTRL_TI_CONF_INTX_ASSERT			0x0124
+#define	PCIECTRL_TI_CONF_INTX_DEASSERT			0x0128
+
+#define	PCIECTRL_TI_CONF_MSI_XMT			0x012c
+#define MSI_REQ_GRANT					BIT(0)
+#define MSI_VECTOR_SHIFT				7
+
+#define PCIE_1LANE_2LANE_SELECTION			BIT(13)
+#define PCIE_B1C0_MODE_SEL				BIT(2)
+#define PCIE_B0_B1_TSYNCEN				BIT(0)
+
+struct dra7xx_pcie {
+	struct dw_pcie		*pci;
+	void __iomem		*base;		/* DT ti_conf */
+	int			phy_count;	/* DT phy-names count */
+	struct phy		**phy;
+	int			link_gen;
+	struct irq_domain	*irq_domain;
+	enum dw_pcie_device_mode mode;
+};
+
+struct dra7xx_pcie_of_data {
+	enum dw_pcie_device_mode mode;
+	u32 b1co_mode_sel_mask;
+};
+
+#define to_dra7xx_pcie(x)	dev_get_drvdata((x)->dev)
+
+static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset)
+{
+	return readl(pcie->base + offset);
+}
+
+static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
+				      u32 value)
+{
+	writel(value, pcie->base + offset);
+}
+
+static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
+{
+	return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
+}
+
+static int dra7xx_pcie_link_up(struct dw_pcie *pci)
+{
+	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+	u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
+
+	return !!(reg & LINK_UP);
+}
+
+static void dra7xx_pcie_stop_link(struct dw_pcie *pci)
+{
+	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+	u32 reg;
+
+	reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
+	reg &= ~LTSSM_EN;
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
+}
+
+static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
+{
+	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+	struct device *dev = pci->dev;
+	u32 reg;
+	u32 exp_cap_off = EXP_CAP_ID_OFFSET;
+
+	if (dw_pcie_link_up(pci)) {
+		dev_err(dev, "link is already up\n");
+		return 0;
+	}
+
+	if (dra7xx->link_gen == 1) {
+		dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP,
+			     4, &reg);
+		if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
+			reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
+			reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
+			dw_pcie_write(pci->dbi_base + exp_cap_off +
+				      PCI_EXP_LNKCAP, 4, reg);
+		}
+
+		dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2,
+			     2, &reg);
+		if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
+			reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
+			reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
+			dw_pcie_write(pci->dbi_base + exp_cap_off +
+				      PCI_EXP_LNKCTL2, 2, reg);
+		}
+	}
+
+	reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
+	reg |= LTSSM_EN;
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
+
+	return 0;
+}
+
+static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
+{
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
+			   LEG_EP_INTERRUPTS | MSI);
+
+	dra7xx_pcie_writel(dra7xx,
+			   PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
+			   MSI | LEG_EP_INTERRUPTS);
+}
+
+static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx)
+{
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
+			   INTERRUPTS);
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN,
+			   INTERRUPTS);
+}
+
+static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
+{
+	dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
+	dra7xx_pcie_enable_msi_interrupts(dra7xx);
+}
+
+static int dra7xx_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+
+	dw_pcie_setup_rc(pp);
+
+	dra7xx_pcie_establish_link(pci);
+	dw_pcie_wait_for_link(pci);
+	dw_pcie_msi_init(pp);
+	dra7xx_pcie_enable_interrupts(dra7xx);
+
+	return 0;
+}
+
+static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
+	.host_init = dra7xx_pcie_host_init,
+};
+
+static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+				irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+	irq_set_chip_data(irq, domain->host_data);
+
+	return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+	.map = dra7xx_pcie_intx_map,
+	.xlate = pci_irqd_intx_xlate,
+};
+
+static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct device *dev = pci->dev;
+	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+	struct device_node *node = dev->of_node;
+	struct device_node *pcie_intc_node =  of_get_next_child(node, NULL);
+
+	if (!pcie_intc_node) {
+		dev_err(dev, "No PCIe Intc node found\n");
+		return -ENODEV;
+	}
+
+	dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+						   &intx_domain_ops, pp);
+	of_node_put(pcie_intc_node);
+	if (!dra7xx->irq_domain) {
+		dev_err(dev, "Failed to get a INTx IRQ domain\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
+{
+	struct dra7xx_pcie *dra7xx = arg;
+	struct dw_pcie *pci = dra7xx->pci;
+	struct pcie_port *pp = &pci->pp;
+	unsigned long reg;
+	u32 virq, bit;
+
+	reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
+
+	switch (reg) {
+	case MSI:
+		dw_handle_msi_irq(pp);
+		break;
+	case INTA:
+	case INTB:
+	case INTC:
+	case INTD:
+		for_each_set_bit(bit, &reg, PCI_NUM_INTX) {
+			virq = irq_find_mapping(dra7xx->irq_domain, bit);
+			if (virq)
+				generic_handle_irq(virq);
+		}
+		break;
+	}
+
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
+{
+	struct dra7xx_pcie *dra7xx = arg;
+	struct dw_pcie *pci = dra7xx->pci;
+	struct device *dev = pci->dev;
+	struct dw_pcie_ep *ep = &pci->ep;
+	u32 reg;
+
+	reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
+
+	if (reg & ERR_SYS)
+		dev_dbg(dev, "System Error\n");
+
+	if (reg & ERR_FATAL)
+		dev_dbg(dev, "Fatal Error\n");
+
+	if (reg & ERR_NONFATAL)
+		dev_dbg(dev, "Non Fatal Error\n");
+
+	if (reg & ERR_COR)
+		dev_dbg(dev, "Correctable Error\n");
+
+	if (reg & ERR_AXI)
+		dev_dbg(dev, "AXI tag lookup fatal Error\n");
+
+	if (reg & ERR_ECRC)
+		dev_dbg(dev, "ECRC Error\n");
+
+	if (reg & PME_TURN_OFF)
+		dev_dbg(dev,
+			"Power Management Event Turn-Off message received\n");
+
+	if (reg & PME_TO_ACK)
+		dev_dbg(dev,
+			"Power Management Turn-Off Ack message received\n");
+
+	if (reg & PM_PME)
+		dev_dbg(dev, "PM Power Management Event message received\n");
+
+	if (reg & LINK_REQ_RST)
+		dev_dbg(dev, "Link Request Reset\n");
+
+	if (reg & LINK_UP_EVT) {
+		if (dra7xx->mode == DW_PCIE_EP_TYPE)
+			dw_pcie_ep_linkup(ep);
+		dev_dbg(dev, "Link-up state change\n");
+	}
+
+	if (reg & CFG_BME_EVT)
+		dev_dbg(dev, "CFG 'Bus Master Enable' change\n");
+
+	if (reg & CFG_MSE_EVT)
+		dev_dbg(dev, "CFG 'Memory Space Enable' change\n");
+
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg);
+
+	return IRQ_HANDLED;
+}
+
+static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+	enum pci_barno bar;
+
+	for (bar = BAR_0; bar <= BAR_5; bar++)
+		dw_pcie_ep_reset_bar(pci, bar);
+
+	dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
+}
+
+static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx)
+{
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1);
+	mdelay(1);
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1);
+}
+
+static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
+				      u8 interrupt_num)
+{
+	u32 reg;
+
+	reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT;
+	reg |= MSI_REQ_GRANT;
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg);
+}
+
+static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+				 enum pci_epc_irq_type type, u16 interrupt_num)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+
+	switch (type) {
+	case PCI_EPC_IRQ_LEGACY:
+		dra7xx_pcie_raise_legacy_irq(dra7xx);
+		break;
+	case PCI_EPC_IRQ_MSI:
+		dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num);
+		break;
+	default:
+		dev_err(pci->dev, "UNKNOWN IRQ type\n");
+	}
+
+	return 0;
+}
+
+static const struct pci_epc_features dra7xx_pcie_epc_features = {
+	.linkup_notifier = true,
+	.msi_capable = true,
+	.msix_capable = false,
+};
+
+static const struct pci_epc_features*
+dra7xx_pcie_get_features(struct dw_pcie_ep *ep)
+{
+	return &dra7xx_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+	.ep_init = dra7xx_pcie_ep_init,
+	.raise_irq = dra7xx_pcie_raise_irq,
+	.get_features = dra7xx_pcie_get_features,
+};
+
+static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
+				     struct platform_device *pdev)
+{
+	int ret;
+	struct dw_pcie_ep *ep;
+	struct resource *res;
+	struct device *dev = &pdev->dev;
+	struct dw_pcie *pci = dra7xx->pci;
+
+	ep = &pci->ep;
+	ep->ops = &pcie_ep_ops;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics");
+	pci->dbi_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pci->dbi_base))
+		return PTR_ERR(pci->dbi_base);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2");
+	pci->dbi_base2 = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pci->dbi_base2))
+		return PTR_ERR(pci->dbi_base2);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+	if (!res)
+		return -EINVAL;
+
+	ep->phys_base = res->start;
+	ep->addr_size = resource_size(res);
+
+	ret = dw_pcie_ep_init(ep);
+	if (ret) {
+		dev_err(dev, "failed to initialize endpoint\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
+				       struct platform_device *pdev)
+{
+	int ret;
+	struct dw_pcie *pci = dra7xx->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = pci->dev;
+	struct resource *res;
+
+	pp->irq = platform_get_irq(pdev, 1);
+	if (pp->irq < 0) {
+		dev_err(dev, "missing IRQ resource\n");
+		return pp->irq;
+	}
+
+	ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler,
+			       IRQF_SHARED | IRQF_NO_THREAD,
+			       "dra7-pcie-msi",	dra7xx);
+	if (ret) {
+		dev_err(dev, "failed to request irq\n");
+		return ret;
+	}
+
+	ret = dra7xx_pcie_init_irq_domain(pp);
+	if (ret < 0)
+		return ret;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics");
+	pci->dbi_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pci->dbi_base))
+		return PTR_ERR(pci->dbi_base);
+
+	pp->ops = &dra7xx_pcie_host_ops;
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "failed to initialize host\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	.cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup,
+	.start_link = dra7xx_pcie_establish_link,
+	.stop_link = dra7xx_pcie_stop_link,
+	.link_up = dra7xx_pcie_link_up,
+};
+
+static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx)
+{
+	int phy_count = dra7xx->phy_count;
+
+	while (phy_count--) {
+		phy_power_off(dra7xx->phy[phy_count]);
+		phy_exit(dra7xx->phy[phy_count]);
+	}
+}
+
+static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx)
+{
+	int phy_count = dra7xx->phy_count;
+	int ret;
+	int i;
+
+	for (i = 0; i < phy_count; i++) {
+		ret = phy_set_mode(dra7xx->phy[i], PHY_MODE_PCIE);
+		if (ret < 0)
+			goto err_phy;
+
+		ret = phy_init(dra7xx->phy[i]);
+		if (ret < 0)
+			goto err_phy;
+
+		ret = phy_power_on(dra7xx->phy[i]);
+		if (ret < 0) {
+			phy_exit(dra7xx->phy[i]);
+			goto err_phy;
+		}
+	}
+
+	return 0;
+
+err_phy:
+	while (--i >= 0) {
+		phy_power_off(dra7xx->phy[i]);
+		phy_exit(dra7xx->phy[i]);
+	}
+
+	return ret;
+}
+
+static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = {
+	.mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = {
+	.mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra746_pcie_rc_of_data = {
+	.b1co_mode_sel_mask = BIT(2),
+	.mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra726_pcie_rc_of_data = {
+	.b1co_mode_sel_mask = GENMASK(3, 2),
+	.mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra746_pcie_ep_of_data = {
+	.b1co_mode_sel_mask = BIT(2),
+	.mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra726_pcie_ep_of_data = {
+	.b1co_mode_sel_mask = GENMASK(3, 2),
+	.mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct of_device_id of_dra7xx_pcie_match[] = {
+	{
+		.compatible = "ti,dra7-pcie",
+		.data = &dra7xx_pcie_rc_of_data,
+	},
+	{
+		.compatible = "ti,dra7-pcie-ep",
+		.data = &dra7xx_pcie_ep_of_data,
+	},
+	{
+		.compatible = "ti,dra746-pcie-rc",
+		.data = &dra746_pcie_rc_of_data,
+	},
+	{
+		.compatible = "ti,dra726-pcie-rc",
+		.data = &dra726_pcie_rc_of_data,
+	},
+	{
+		.compatible = "ti,dra746-pcie-ep",
+		.data = &dra746_pcie_ep_of_data,
+	},
+	{
+		.compatible = "ti,dra726-pcie-ep",
+		.data = &dra726_pcie_ep_of_data,
+	},
+	{},
+};
+
+/*
+ * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
+ * @dra7xx: the dra7xx device where the workaround should be applied
+ *
+ * Access to the PCIe slave port that are not 32-bit aligned will result
+ * in incorrect mapping to TLP Address and Byte enable fields. Therefore,
+ * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or
+ * 0x3.
+ *
+ * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1.
+ */
+static int dra7xx_pcie_unaligned_memaccess(struct device *dev)
+{
+	int ret;
+	struct device_node *np = dev->of_node;
+	struct of_phandle_args args;
+	struct regmap *regmap;
+
+	regmap = syscon_regmap_lookup_by_phandle(np,
+						 "ti,syscon-unaligned-access");
+	if (IS_ERR(regmap)) {
+		dev_dbg(dev, "can't get ti,syscon-unaligned-access\n");
+		return -EINVAL;
+	}
+
+	ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access",
+					       2, 0, &args);
+	if (ret) {
+		dev_err(dev, "failed to parse ti,syscon-unaligned-access\n");
+		return ret;
+	}
+
+	ret = regmap_update_bits(regmap, args.args[0], args.args[1],
+				 args.args[1]);
+	if (ret)
+		dev_err(dev, "failed to enable unaligned access\n");
+
+	of_node_put(args.np);
+
+	return ret;
+}
+
+static int dra7xx_pcie_configure_two_lane(struct device *dev,
+					  u32 b1co_mode_sel_mask)
+{
+	struct device_node *np = dev->of_node;
+	struct regmap *pcie_syscon;
+	unsigned int pcie_reg;
+	u32 mask;
+	u32 val;
+
+	pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel");
+	if (IS_ERR(pcie_syscon)) {
+		dev_err(dev, "unable to get ti,syscon-lane-sel\n");
+		return -EINVAL;
+	}
+
+	if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1,
+				       &pcie_reg)) {
+		dev_err(dev, "couldn't get lane selection reg offset\n");
+		return -EINVAL;
+	}
+
+	mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN;
+	val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN;
+	regmap_update_bits(pcie_syscon, pcie_reg, mask, val);
+
+	return 0;
+}
+
+static int __init dra7xx_pcie_probe(struct platform_device *pdev)
+{
+	u32 reg;
+	int ret;
+	int irq;
+	int i;
+	int phy_count;
+	struct phy **phy;
+	struct device_link **link;
+	void __iomem *base;
+	struct resource *res;
+	struct dw_pcie *pci;
+	struct dra7xx_pcie *dra7xx;
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	char name[10];
+	struct gpio_desc *reset;
+	const struct of_device_id *match;
+	const struct dra7xx_pcie_of_data *data;
+	enum dw_pcie_device_mode mode;
+	u32 b1co_mode_sel_mask;
+
+	match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev);
+	if (!match)
+		return -EINVAL;
+
+	data = (struct dra7xx_pcie_of_data *)match->data;
+	mode = (enum dw_pcie_device_mode)data->mode;
+	b1co_mode_sel_mask = data->b1co_mode_sel_mask;
+
+	dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
+	if (!dra7xx)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	pci->dev = dev;
+	pci->ops = &dw_pcie_ops;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(dev, "missing IRQ resource: %d\n", irq);
+		return irq;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
+	base = devm_ioremap_nocache(dev, res->start, resource_size(res));
+	if (!base)
+		return -ENOMEM;
+
+	phy_count = of_property_count_strings(np, "phy-names");
+	if (phy_count < 0) {
+		dev_err(dev, "unable to find the strings\n");
+		return phy_count;
+	}
+
+	phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL);
+	if (!phy)
+		return -ENOMEM;
+
+	link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL);
+	if (!link)
+		return -ENOMEM;
+
+	for (i = 0; i < phy_count; i++) {
+		snprintf(name, sizeof(name), "pcie-phy%d", i);
+		phy[i] = devm_phy_get(dev, name);
+		if (IS_ERR(phy[i]))
+			return PTR_ERR(phy[i]);
+
+		link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
+		if (!link[i]) {
+			ret = -EINVAL;
+			goto err_link;
+		}
+	}
+
+	dra7xx->base = base;
+	dra7xx->phy = phy;
+	dra7xx->pci = pci;
+	dra7xx->phy_count = phy_count;
+
+	if (phy_count == 2) {
+		ret = dra7xx_pcie_configure_two_lane(dev, b1co_mode_sel_mask);
+		if (ret < 0)
+			dra7xx->phy_count = 1; /* Fallback to x1 lane mode */
+	}
+
+	ret = dra7xx_pcie_enable_phy(dra7xx);
+	if (ret) {
+		dev_err(dev, "failed to enable phy\n");
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, dra7xx);
+
+	pm_runtime_enable(dev);
+	ret = pm_runtime_get_sync(dev);
+	if (ret < 0) {
+		dev_err(dev, "pm_runtime_get_sync failed\n");
+		goto err_get_sync;
+	}
+
+	reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH);
+	if (IS_ERR(reset)) {
+		ret = PTR_ERR(reset);
+		dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
+		goto err_gpio;
+	}
+
+	reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
+	reg &= ~LTSSM_EN;
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
+
+	dra7xx->link_gen = of_pci_get_max_link_speed(np);
+	if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2)
+		dra7xx->link_gen = 2;
+
+	switch (mode) {
+	case DW_PCIE_RC_TYPE:
+		if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) {
+			ret = -ENODEV;
+			goto err_gpio;
+		}
+
+		dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
+				   DEVICE_TYPE_RC);
+
+		ret = dra7xx_pcie_unaligned_memaccess(dev);
+		if (ret)
+			dev_err(dev, "WA for Errata i870 not applied\n");
+
+		ret = dra7xx_add_pcie_port(dra7xx, pdev);
+		if (ret < 0)
+			goto err_gpio;
+		break;
+	case DW_PCIE_EP_TYPE:
+		if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) {
+			ret = -ENODEV;
+			goto err_gpio;
+		}
+
+		dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
+				   DEVICE_TYPE_EP);
+
+		ret = dra7xx_pcie_unaligned_memaccess(dev);
+		if (ret)
+			goto err_gpio;
+
+		ret = dra7xx_add_pcie_ep(dra7xx, pdev);
+		if (ret < 0)
+			goto err_gpio;
+		break;
+	default:
+		dev_err(dev, "INVALID device type %d\n", mode);
+	}
+	dra7xx->mode = mode;
+
+	ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
+			       IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
+	if (ret) {
+		dev_err(dev, "failed to request irq\n");
+		goto err_gpio;
+	}
+
+	return 0;
+
+err_gpio:
+	pm_runtime_put(dev);
+
+err_get_sync:
+	pm_runtime_disable(dev);
+	dra7xx_pcie_disable_phy(dra7xx);
+
+err_link:
+	while (--i >= 0)
+		device_link_del(link[i]);
+
+	return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dra7xx_pcie_suspend(struct device *dev)
+{
+	struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+	struct dw_pcie *pci = dra7xx->pci;
+	u32 val;
+
+	if (dra7xx->mode != DW_PCIE_RC_TYPE)
+		return 0;
+
+	/* clear MSE */
+	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
+	val &= ~PCI_COMMAND_MEMORY;
+	dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
+
+	return 0;
+}
+
+static int dra7xx_pcie_resume(struct device *dev)
+{
+	struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+	struct dw_pcie *pci = dra7xx->pci;
+	u32 val;
+
+	if (dra7xx->mode != DW_PCIE_RC_TYPE)
+		return 0;
+
+	/* set MSE */
+	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
+	val |= PCI_COMMAND_MEMORY;
+	dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
+
+	return 0;
+}
+
+static int dra7xx_pcie_suspend_noirq(struct device *dev)
+{
+	struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+
+	dra7xx_pcie_disable_phy(dra7xx);
+
+	return 0;
+}
+
+static int dra7xx_pcie_resume_noirq(struct device *dev)
+{
+	struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+	int ret;
+
+	ret = dra7xx_pcie_enable_phy(dra7xx);
+	if (ret) {
+		dev_err(dev, "failed to enable phy\n");
+		return ret;
+	}
+
+	return 0;
+}
+#endif
+
+static void dra7xx_pcie_shutdown(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+	int ret;
+
+	dra7xx_pcie_stop_link(dra7xx->pci);
+
+	ret = pm_runtime_put_sync(dev);
+	if (ret < 0)
+		dev_dbg(dev, "pm_runtime_put_sync failed\n");
+
+	pm_runtime_disable(dev);
+	dra7xx_pcie_disable_phy(dra7xx);
+}
+
+static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
+				      dra7xx_pcie_resume_noirq)
+};
+
+static struct platform_driver dra7xx_pcie_driver = {
+	.driver = {
+		.name	= "dra7-pcie",
+		.of_match_table = of_dra7xx_pcie_match,
+		.suppress_bind_attrs = true,
+		.pm	= &dra7xx_pcie_pm_ops,
+	},
+	.shutdown = dra7xx_pcie_shutdown,
+};
+builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pci-exynos.c b/marvell/linux/drivers/pci/controller/dwc/pci-exynos.c
new file mode 100644
index 0000000..14a6ba4
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pci-exynos.c
@@ -0,0 +1,538 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Samsung EXYNOS SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define to_exynos_pcie(x)	dev_get_drvdata((x)->dev)
+
+/* PCIe ELBI registers */
+#define PCIE_IRQ_PULSE			0x000
+#define IRQ_INTA_ASSERT			BIT(0)
+#define IRQ_INTB_ASSERT			BIT(2)
+#define IRQ_INTC_ASSERT			BIT(4)
+#define IRQ_INTD_ASSERT			BIT(6)
+#define PCIE_IRQ_LEVEL			0x004
+#define PCIE_IRQ_SPECIAL		0x008
+#define PCIE_IRQ_EN_PULSE		0x00c
+#define PCIE_IRQ_EN_LEVEL		0x010
+#define IRQ_MSI_ENABLE			BIT(2)
+#define PCIE_IRQ_EN_SPECIAL		0x014
+#define PCIE_PWR_RESET			0x018
+#define PCIE_CORE_RESET			0x01c
+#define PCIE_CORE_RESET_ENABLE		BIT(0)
+#define PCIE_STICKY_RESET		0x020
+#define PCIE_NONSTICKY_RESET		0x024
+#define PCIE_APP_INIT_RESET		0x028
+#define PCIE_APP_LTSSM_ENABLE		0x02c
+#define PCIE_ELBI_RDLH_LINKUP		0x064
+#define PCIE_ELBI_LTSSM_ENABLE		0x1
+#define PCIE_ELBI_SLV_AWMISC		0x11c
+#define PCIE_ELBI_SLV_ARMISC		0x120
+#define PCIE_ELBI_SLV_DBI_ENABLE	BIT(21)
+
+struct exynos_pcie_mem_res {
+	void __iomem *elbi_base;   /* DT 0th resource: PCIe CTRL */
+};
+
+struct exynos_pcie_clk_res {
+	struct clk *clk;
+	struct clk *bus_clk;
+};
+
+struct exynos_pcie {
+	struct dw_pcie			*pci;
+	struct exynos_pcie_mem_res	*mem_res;
+	struct exynos_pcie_clk_res	*clk_res;
+	const struct exynos_pcie_ops	*ops;
+	int				reset_gpio;
+
+	struct phy			*phy;
+};
+
+struct exynos_pcie_ops {
+	int (*get_mem_resources)(struct platform_device *pdev,
+			struct exynos_pcie *ep);
+	int (*get_clk_resources)(struct exynos_pcie *ep);
+	int (*init_clk_resources)(struct exynos_pcie *ep);
+	void (*deinit_clk_resources)(struct exynos_pcie *ep);
+};
+
+static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev,
+					     struct exynos_pcie *ep)
+{
+	struct dw_pcie *pci = ep->pci;
+	struct device *dev = pci->dev;
+	struct resource *res;
+
+	ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL);
+	if (!ep->mem_res)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ep->mem_res->elbi_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(ep->mem_res->elbi_base))
+		return PTR_ERR(ep->mem_res->elbi_base);
+
+	return 0;
+}
+
+static int exynos5440_pcie_get_clk_resources(struct exynos_pcie *ep)
+{
+	struct dw_pcie *pci = ep->pci;
+	struct device *dev = pci->dev;
+
+	ep->clk_res = devm_kzalloc(dev, sizeof(*ep->clk_res), GFP_KERNEL);
+	if (!ep->clk_res)
+		return -ENOMEM;
+
+	ep->clk_res->clk = devm_clk_get(dev, "pcie");
+	if (IS_ERR(ep->clk_res->clk)) {
+		dev_err(dev, "Failed to get pcie rc clock\n");
+		return PTR_ERR(ep->clk_res->clk);
+	}
+
+	ep->clk_res->bus_clk = devm_clk_get(dev, "pcie_bus");
+	if (IS_ERR(ep->clk_res->bus_clk)) {
+		dev_err(dev, "Failed to get pcie bus clock\n");
+		return PTR_ERR(ep->clk_res->bus_clk);
+	}
+
+	return 0;
+}
+
+static int exynos5440_pcie_init_clk_resources(struct exynos_pcie *ep)
+{
+	struct dw_pcie *pci = ep->pci;
+	struct device *dev = pci->dev;
+	int ret;
+
+	ret = clk_prepare_enable(ep->clk_res->clk);
+	if (ret) {
+		dev_err(dev, "cannot enable pcie rc clock");
+		return ret;
+	}
+
+	ret = clk_prepare_enable(ep->clk_res->bus_clk);
+	if (ret) {
+		dev_err(dev, "cannot enable pcie bus clock");
+		goto err_bus_clk;
+	}
+
+	return 0;
+
+err_bus_clk:
+	clk_disable_unprepare(ep->clk_res->clk);
+
+	return ret;
+}
+
+static void exynos5440_pcie_deinit_clk_resources(struct exynos_pcie *ep)
+{
+	clk_disable_unprepare(ep->clk_res->bus_clk);
+	clk_disable_unprepare(ep->clk_res->clk);
+}
+
+static const struct exynos_pcie_ops exynos5440_pcie_ops = {
+	.get_mem_resources	= exynos5440_pcie_get_mem_resources,
+	.get_clk_resources	= exynos5440_pcie_get_clk_resources,
+	.init_clk_resources	= exynos5440_pcie_init_clk_resources,
+	.deinit_clk_resources	= exynos5440_pcie_deinit_clk_resources,
+};
+
+static void exynos_pcie_writel(void __iomem *base, u32 val, u32 reg)
+{
+	writel(val, base + reg);
+}
+
+static u32 exynos_pcie_readl(void __iomem *base, u32 reg)
+{
+	return readl(base + reg);
+}
+
+static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *ep, bool on)
+{
+	u32 val;
+
+	val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_AWMISC);
+	if (on)
+		val |= PCIE_ELBI_SLV_DBI_ENABLE;
+	else
+		val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+	exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_AWMISC);
+}
+
+static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *ep, bool on)
+{
+	u32 val;
+
+	val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_ARMISC);
+	if (on)
+		val |= PCIE_ELBI_SLV_DBI_ENABLE;
+	else
+		val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+	exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_ARMISC);
+}
+
+static void exynos_pcie_assert_core_reset(struct exynos_pcie *ep)
+{
+	u32 val;
+
+	val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET);
+	val &= ~PCIE_CORE_RESET_ENABLE;
+	exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET);
+	exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_PWR_RESET);
+	exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_STICKY_RESET);
+	exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_NONSTICKY_RESET);
+}
+
+static void exynos_pcie_deassert_core_reset(struct exynos_pcie *ep)
+{
+	u32 val;
+
+	val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET);
+	val |= PCIE_CORE_RESET_ENABLE;
+
+	exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET);
+	exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_STICKY_RESET);
+	exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_NONSTICKY_RESET);
+	exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_APP_INIT_RESET);
+	exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_APP_INIT_RESET);
+}
+
+static void exynos_pcie_assert_reset(struct exynos_pcie *ep)
+{
+	struct dw_pcie *pci = ep->pci;
+	struct device *dev = pci->dev;
+
+	if (ep->reset_gpio >= 0)
+		devm_gpio_request_one(dev, ep->reset_gpio,
+				GPIOF_OUT_INIT_HIGH, "RESET");
+}
+
+static int exynos_pcie_establish_link(struct exynos_pcie *ep)
+{
+	struct dw_pcie *pci = ep->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = pci->dev;
+
+	if (dw_pcie_link_up(pci)) {
+		dev_err(dev, "Link already up\n");
+		return 0;
+	}
+
+	exynos_pcie_assert_core_reset(ep);
+
+	phy_reset(ep->phy);
+
+	exynos_pcie_writel(ep->mem_res->elbi_base, 1,
+			PCIE_PWR_RESET);
+
+	phy_power_on(ep->phy);
+	phy_init(ep->phy);
+
+	exynos_pcie_deassert_core_reset(ep);
+	dw_pcie_setup_rc(pp);
+	exynos_pcie_assert_reset(ep);
+
+	/* assert LTSSM enable */
+	exynos_pcie_writel(ep->mem_res->elbi_base, PCIE_ELBI_LTSSM_ENABLE,
+			  PCIE_APP_LTSSM_ENABLE);
+
+	/* check if the link is up or not */
+	if (!dw_pcie_wait_for_link(pci))
+		return 0;
+
+	phy_power_off(ep->phy);
+	return -ETIMEDOUT;
+}
+
+static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *ep)
+{
+	u32 val;
+
+	val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_PULSE);
+	exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_PULSE);
+}
+
+static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *ep)
+{
+	u32 val;
+
+	/* enable INTX interrupt */
+	val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
+		IRQ_INTC_ASSERT | IRQ_INTD_ASSERT;
+	exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_PULSE);
+}
+
+static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
+{
+	struct exynos_pcie *ep = arg;
+
+	exynos_pcie_clear_irq_pulse(ep);
+	return IRQ_HANDLED;
+}
+
+static void exynos_pcie_msi_init(struct exynos_pcie *ep)
+{
+	struct dw_pcie *pci = ep->pci;
+	struct pcie_port *pp = &pci->pp;
+	u32 val;
+
+	dw_pcie_msi_init(pp);
+
+	/* enable MSI interrupt */
+	val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_EN_LEVEL);
+	val |= IRQ_MSI_ENABLE;
+	exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_LEVEL);
+}
+
+static void exynos_pcie_enable_interrupts(struct exynos_pcie *ep)
+{
+	exynos_pcie_enable_irq_pulse(ep);
+
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		exynos_pcie_msi_init(ep);
+}
+
+static u32 exynos_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
+				u32 reg, size_t size)
+{
+	struct exynos_pcie *ep = to_exynos_pcie(pci);
+	u32 val;
+
+	exynos_pcie_sideband_dbi_r_mode(ep, true);
+	dw_pcie_read(base + reg, size, &val);
+	exynos_pcie_sideband_dbi_r_mode(ep, false);
+	return val;
+}
+
+static void exynos_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
+				  u32 reg, size_t size, u32 val)
+{
+	struct exynos_pcie *ep = to_exynos_pcie(pci);
+
+	exynos_pcie_sideband_dbi_w_mode(ep, true);
+	dw_pcie_write(base + reg, size, val);
+	exynos_pcie_sideband_dbi_w_mode(ep, false);
+}
+
+static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
+				u32 *val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct exynos_pcie *ep = to_exynos_pcie(pci);
+	int ret;
+
+	exynos_pcie_sideband_dbi_r_mode(ep, true);
+	ret = dw_pcie_read(pci->dbi_base + where, size, val);
+	exynos_pcie_sideband_dbi_r_mode(ep, false);
+	return ret;
+}
+
+static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
+				u32 val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct exynos_pcie *ep = to_exynos_pcie(pci);
+	int ret;
+
+	exynos_pcie_sideband_dbi_w_mode(ep, true);
+	ret = dw_pcie_write(pci->dbi_base + where, size, val);
+	exynos_pcie_sideband_dbi_w_mode(ep, false);
+	return ret;
+}
+
+static int exynos_pcie_link_up(struct dw_pcie *pci)
+{
+	struct exynos_pcie *ep = to_exynos_pcie(pci);
+	u32 val;
+
+	val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_RDLH_LINKUP);
+	if (val == PCIE_ELBI_LTSSM_ENABLE)
+		return 1;
+
+	return 0;
+}
+
+static int exynos_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct exynos_pcie *ep = to_exynos_pcie(pci);
+
+	exynos_pcie_establish_link(ep);
+	exynos_pcie_enable_interrupts(ep);
+
+	return 0;
+}
+
+static const struct dw_pcie_host_ops exynos_pcie_host_ops = {
+	.rd_own_conf = exynos_pcie_rd_own_conf,
+	.wr_own_conf = exynos_pcie_wr_own_conf,
+	.host_init = exynos_pcie_host_init,
+};
+
+static int __init exynos_add_pcie_port(struct exynos_pcie *ep,
+				       struct platform_device *pdev)
+{
+	struct dw_pcie *pci = ep->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	pp->irq = platform_get_irq(pdev, 1);
+	if (pp->irq < 0) {
+		dev_err(dev, "failed to get irq\n");
+		return pp->irq;
+	}
+	ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler,
+				IRQF_SHARED, "exynos-pcie", ep);
+	if (ret) {
+		dev_err(dev, "failed to request irq\n");
+		return ret;
+	}
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		pp->msi_irq = platform_get_irq(pdev, 0);
+		if (pp->msi_irq < 0) {
+			dev_err(dev, "failed to get msi irq\n");
+			return pp->msi_irq;
+		}
+	}
+
+	pp->ops = &exynos_pcie_host_ops;
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "failed to initialize host\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	.read_dbi = exynos_pcie_read_dbi,
+	.write_dbi = exynos_pcie_write_dbi,
+	.link_up = exynos_pcie_link_up,
+};
+
+static int __init exynos_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dw_pcie *pci;
+	struct exynos_pcie *ep;
+	struct device_node *np = dev->of_node;
+	int ret;
+
+	ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+	if (!ep)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	pci->dev = dev;
+	pci->ops = &dw_pcie_ops;
+
+	ep->pci = pci;
+	ep->ops = (const struct exynos_pcie_ops *)
+		of_device_get_match_data(dev);
+
+	ep->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+
+	ep->phy = devm_of_phy_get(dev, np, NULL);
+	if (IS_ERR(ep->phy)) {
+		if (PTR_ERR(ep->phy) != -ENODEV)
+			return PTR_ERR(ep->phy);
+
+		ep->phy = NULL;
+	}
+
+	if (ep->ops && ep->ops->get_mem_resources) {
+		ret = ep->ops->get_mem_resources(pdev, ep);
+		if (ret)
+			return ret;
+	}
+
+	if (ep->ops && ep->ops->get_clk_resources &&
+			ep->ops->init_clk_resources) {
+		ret = ep->ops->get_clk_resources(ep);
+		if (ret)
+			return ret;
+		ret = ep->ops->init_clk_resources(ep);
+		if (ret)
+			return ret;
+	}
+
+	platform_set_drvdata(pdev, ep);
+
+	ret = exynos_add_pcie_port(ep, pdev);
+	if (ret < 0)
+		goto fail_probe;
+
+	return 0;
+
+fail_probe:
+	phy_exit(ep->phy);
+
+	if (ep->ops && ep->ops->deinit_clk_resources)
+		ep->ops->deinit_clk_resources(ep);
+	return ret;
+}
+
+static int __exit exynos_pcie_remove(struct platform_device *pdev)
+{
+	struct exynos_pcie *ep = platform_get_drvdata(pdev);
+
+	if (ep->ops && ep->ops->deinit_clk_resources)
+		ep->ops->deinit_clk_resources(ep);
+
+	return 0;
+}
+
+static const struct of_device_id exynos_pcie_of_match[] = {
+	{
+		.compatible = "samsung,exynos5440-pcie",
+		.data = &exynos5440_pcie_ops
+	},
+	{},
+};
+
+static struct platform_driver exynos_pcie_driver = {
+	.remove		= __exit_p(exynos_pcie_remove),
+	.driver = {
+		.name	= "exynos-pcie",
+		.of_match_table = exynos_pcie_of_match,
+	},
+};
+
+/* Exynos PCIe driver does not allow module unload */
+
+static int __init exynos_pcie_init(void)
+{
+	return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe);
+}
+subsys_initcall(exynos_pcie_init);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pci-imx6.c b/marvell/linux/drivers/pci/controller/dwc/pci-imx6.c
new file mode 100644
index 0000000..30c259f
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pci-imx6.c
@@ -0,0 +1,1318 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Freescale i.MX6 SoCs
+ *
+ * Copyright (C) 2013 Kosagi
+ *		http://www.kosagi.com
+ *
+ * Author: Sean Cross <xobs@kosagi.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/reset.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+
+#include "pcie-designware.h"
+
+#define IMX8MQ_GPR_PCIE_REF_USE_PAD		BIT(9)
+#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN	BIT(10)
+#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE	BIT(11)
+#define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE	GENMASK(11, 8)
+#define IMX8MQ_PCIE2_BASE_ADDR			0x33c00000
+
+#define to_imx6_pcie(x)	dev_get_drvdata((x)->dev)
+
+enum imx6_pcie_variants {
+	IMX6Q,
+	IMX6SX,
+	IMX6QP,
+	IMX7D,
+	IMX8MQ,
+};
+
+#define IMX6_PCIE_FLAG_IMX6_PHY			BIT(0)
+#define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE	BIT(1)
+#define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND		BIT(2)
+
+struct imx6_pcie_drvdata {
+	enum imx6_pcie_variants variant;
+	u32 flags;
+	int dbi_length;
+};
+
+struct imx6_pcie {
+	struct dw_pcie		*pci;
+	int			reset_gpio;
+	bool			gpio_active_high;
+	struct clk		*pcie_bus;
+	struct clk		*pcie_phy;
+	struct clk		*pcie_inbound_axi;
+	struct clk		*pcie;
+	struct clk		*pcie_aux;
+	struct regmap		*iomuxc_gpr;
+	u32			controller_id;
+	struct reset_control	*pciephy_reset;
+	struct reset_control	*apps_reset;
+	struct reset_control	*turnoff_reset;
+	u32			tx_deemph_gen1;
+	u32			tx_deemph_gen2_3p5db;
+	u32			tx_deemph_gen2_6db;
+	u32			tx_swing_full;
+	u32			tx_swing_low;
+	int			link_gen;
+	struct regulator	*vpcie;
+	void __iomem		*phy_base;
+
+	/* power domain for pcie */
+	struct device		*pd_pcie;
+	/* power domain for pcie phy */
+	struct device		*pd_pcie_phy;
+	const struct imx6_pcie_drvdata *drvdata;
+};
+
+/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
+#define PHY_PLL_LOCK_WAIT_USLEEP_MAX	200
+#define PHY_PLL_LOCK_WAIT_TIMEOUT	(2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX)
+
+/* PCIe Root Complex registers (memory-mapped) */
+#define PCIE_RC_IMX6_MSI_CAP			0x50
+#define PCIE_RC_LCR				0x7c
+#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1	0x1
+#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2	0x2
+#define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK	0xf
+
+#define PCIE_RC_LCSR				0x80
+
+/* PCIe Port Logic registers (memory-mapped) */
+#define PL_OFFSET 0x700
+
+#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
+#define PCIE_PHY_CTRL_DATA(x)		FIELD_PREP(GENMASK(15, 0), (x))
+#define PCIE_PHY_CTRL_CAP_ADR		BIT(16)
+#define PCIE_PHY_CTRL_CAP_DAT		BIT(17)
+#define PCIE_PHY_CTRL_WR		BIT(18)
+#define PCIE_PHY_CTRL_RD		BIT(19)
+
+#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
+#define PCIE_PHY_STAT_ACK		BIT(16)
+
+#define PCIE_LINK_WIDTH_SPEED_CONTROL	0x80C
+
+/* PHY registers (not memory-mapped) */
+#define PCIE_PHY_ATEOVRD			0x10
+#define  PCIE_PHY_ATEOVRD_EN			BIT(2)
+#define  PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT	0
+#define  PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK	0x1
+
+#define PCIE_PHY_MPLL_OVRD_IN_LO		0x11
+#define  PCIE_PHY_MPLL_MULTIPLIER_SHIFT		2
+#define  PCIE_PHY_MPLL_MULTIPLIER_MASK		0x7f
+#define  PCIE_PHY_MPLL_MULTIPLIER_OVRD		BIT(9)
+
+#define PCIE_PHY_RX_ASIC_OUT 0x100D
+#define PCIE_PHY_RX_ASIC_OUT_VALID	(1 << 0)
+
+/* iMX7 PCIe PHY registers */
+#define PCIE_PHY_CMN_REG4		0x14
+/* These are probably the bits that *aren't* DCC_FB_EN */
+#define PCIE_PHY_CMN_REG4_DCC_FB_EN	0x29
+
+#define PCIE_PHY_CMN_REG15	        0x54
+#define PCIE_PHY_CMN_REG15_DLY_4	BIT(2)
+#define PCIE_PHY_CMN_REG15_PLL_PD	BIT(5)
+#define PCIE_PHY_CMN_REG15_OVRD_PLL_PD	BIT(7)
+
+#define PCIE_PHY_CMN_REG24		0x90
+#define PCIE_PHY_CMN_REG24_RX_EQ	BIT(6)
+#define PCIE_PHY_CMN_REG24_RX_EQ_SEL	BIT(3)
+
+#define PCIE_PHY_CMN_REG26		0x98
+#define PCIE_PHY_CMN_REG26_ATT_MODE	0xBC
+
+#define PHY_RX_OVRD_IN_LO 0x1005
+#define PHY_RX_OVRD_IN_LO_RX_DATA_EN		BIT(5)
+#define PHY_RX_OVRD_IN_LO_RX_PLL_EN		BIT(3)
+
+static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
+{
+	struct dw_pcie *pci = imx6_pcie->pci;
+	bool val;
+	u32 max_iterations = 10;
+	u32 wait_counter = 0;
+
+	do {
+		val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT) &
+			PCIE_PHY_STAT_ACK;
+		wait_counter++;
+
+		if (val == exp_val)
+			return 0;
+
+		udelay(1);
+	} while (wait_counter < max_iterations);
+
+	return -ETIMEDOUT;
+}
+
+static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
+{
+	struct dw_pcie *pci = imx6_pcie->pci;
+	u32 val;
+	int ret;
+
+	val = PCIE_PHY_CTRL_DATA(addr);
+	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
+
+	val |= PCIE_PHY_CTRL_CAP_ADR;
+	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
+
+	ret = pcie_phy_poll_ack(imx6_pcie, true);
+	if (ret)
+		return ret;
+
+	val = PCIE_PHY_CTRL_DATA(addr);
+	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
+
+	return pcie_phy_poll_ack(imx6_pcie, false);
+}
+
+/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
+static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
+{
+	struct dw_pcie *pci = imx6_pcie->pci;
+	u32 phy_ctl;
+	int ret;
+
+	ret = pcie_phy_wait_ack(imx6_pcie, addr);
+	if (ret)
+		return ret;
+
+	/* assert Read signal */
+	phy_ctl = PCIE_PHY_CTRL_RD;
+	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
+
+	ret = pcie_phy_poll_ack(imx6_pcie, true);
+	if (ret)
+		return ret;
+
+	*data = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
+
+	/* deassert Read signal */
+	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
+
+	return pcie_phy_poll_ack(imx6_pcie, false);
+}
+
+static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
+{
+	struct dw_pcie *pci = imx6_pcie->pci;
+	u32 var;
+	int ret;
+
+	/* write addr */
+	/* cap addr */
+	ret = pcie_phy_wait_ack(imx6_pcie, addr);
+	if (ret)
+		return ret;
+
+	var = PCIE_PHY_CTRL_DATA(data);
+	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
+
+	/* capture data */
+	var |= PCIE_PHY_CTRL_CAP_DAT;
+	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
+
+	ret = pcie_phy_poll_ack(imx6_pcie, true);
+	if (ret)
+		return ret;
+
+	/* deassert cap data */
+	var = PCIE_PHY_CTRL_DATA(data);
+	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
+
+	/* wait for ack de-assertion */
+	ret = pcie_phy_poll_ack(imx6_pcie, false);
+	if (ret)
+		return ret;
+
+	/* assert wr signal */
+	var = PCIE_PHY_CTRL_WR;
+	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
+
+	/* wait for ack */
+	ret = pcie_phy_poll_ack(imx6_pcie, true);
+	if (ret)
+		return ret;
+
+	/* deassert wr signal */
+	var = PCIE_PHY_CTRL_DATA(data);
+	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
+
+	/* wait for ack de-assertion */
+	ret = pcie_phy_poll_ack(imx6_pcie, false);
+	if (ret)
+		return ret;
+
+	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0);
+
+	return 0;
+}
+
+static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
+{
+	u16 tmp;
+
+	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
+		return;
+
+	pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
+	tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
+		PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+	pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
+
+	usleep_range(2000, 3000);
+
+	pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
+	tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
+		  PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+	pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
+}
+
+#ifdef CONFIG_ARM
+/*  Added for PCI abort handling */
+static int imx6q_pcie_abort_handler(unsigned long addr,
+		unsigned int fsr, struct pt_regs *regs)
+{
+	unsigned long pc = instruction_pointer(regs);
+	unsigned long instr = *(unsigned long *)pc;
+	int reg = (instr >> 12) & 15;
+
+	/*
+	 * If the instruction being executed was a read,
+	 * make it look like it read all-ones.
+	 */
+	if ((instr & 0x0c100000) == 0x04100000) {
+		unsigned long val;
+
+		if (instr & 0x00400000)
+			val = 255;
+		else
+			val = -1;
+
+		regs->uregs[reg] = val;
+		regs->ARM_pc += 4;
+		return 0;
+	}
+
+	if ((instr & 0x0e100090) == 0x00100090) {
+		regs->uregs[reg] = -1;
+		regs->ARM_pc += 4;
+		return 0;
+	}
+
+	return 1;
+}
+#endif
+
+static int imx6_pcie_attach_pd(struct device *dev)
+{
+	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+	struct device_link *link;
+
+	/* Do nothing when in a single power domain */
+	if (dev->pm_domain)
+		return 0;
+
+	imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
+	if (IS_ERR(imx6_pcie->pd_pcie))
+		return PTR_ERR(imx6_pcie->pd_pcie);
+	/* Do nothing when power domain missing */
+	if (!imx6_pcie->pd_pcie)
+		return 0;
+	link = device_link_add(dev, imx6_pcie->pd_pcie,
+			DL_FLAG_STATELESS |
+			DL_FLAG_PM_RUNTIME |
+			DL_FLAG_RPM_ACTIVE);
+	if (!link) {
+		dev_err(dev, "Failed to add device_link to pcie pd.\n");
+		return -EINVAL;
+	}
+
+	imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
+	if (IS_ERR(imx6_pcie->pd_pcie_phy))
+		return PTR_ERR(imx6_pcie->pd_pcie_phy);
+
+	link = device_link_add(dev, imx6_pcie->pd_pcie_phy,
+			DL_FLAG_STATELESS |
+			DL_FLAG_PM_RUNTIME |
+			DL_FLAG_RPM_ACTIVE);
+	if (!link) {
+		dev_err(dev, "Failed to add device_link to pcie_phy pd.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
+{
+	struct device *dev = imx6_pcie->pci->dev;
+
+	switch (imx6_pcie->drvdata->variant) {
+	case IMX7D:
+	case IMX8MQ:
+		reset_control_assert(imx6_pcie->pciephy_reset);
+		reset_control_assert(imx6_pcie->apps_reset);
+		break;
+	case IMX6SX:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
+				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
+		/* Force PCIe PHY reset */
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
+				   IMX6SX_GPR5_PCIE_BTNRST_RESET,
+				   IMX6SX_GPR5_PCIE_BTNRST_RESET);
+		break;
+	case IMX6QP:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+				   IMX6Q_GPR1_PCIE_SW_RST,
+				   IMX6Q_GPR1_PCIE_SW_RST);
+		break;
+	case IMX6Q:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+				   IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+				   IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
+		break;
+	}
+
+	if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
+		int ret = regulator_disable(imx6_pcie->vpcie);
+
+		if (ret)
+			dev_err(dev, "failed to disable vpcie regulator: %d\n",
+				ret);
+	}
+
+	/* Some boards don't have PCIe reset GPIO. */
+	if (gpio_is_valid(imx6_pcie->reset_gpio))
+		gpio_set_value_cansleep(imx6_pcie->reset_gpio,
+					imx6_pcie->gpio_active_high);
+}
+
+static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
+{
+	WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ);
+	return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
+}
+
+static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
+{
+	struct dw_pcie *pci = imx6_pcie->pci;
+	struct device *dev = pci->dev;
+	unsigned int offset;
+	int ret = 0;
+
+	switch (imx6_pcie->drvdata->variant) {
+	case IMX6SX:
+		ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
+		if (ret) {
+			dev_err(dev, "unable to enable pcie_axi clock\n");
+			break;
+		}
+
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
+		break;
+	case IMX6QP:		/* FALLTHROUGH */
+	case IMX6Q:
+		/* power up core phy and enable ref clock */
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+				   IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
+		/*
+		 * the async reset input need ref clock to sync internally,
+		 * when the ref clock comes after reset, internal synced
+		 * reset time is too short, cannot meet the requirement.
+		 * add one ~10us delay here.
+		 */
+		usleep_range(10, 100);
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+				   IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
+		break;
+	case IMX7D:
+		break;
+	case IMX8MQ:
+		ret = clk_prepare_enable(imx6_pcie->pcie_aux);
+		if (ret) {
+			dev_err(dev, "unable to enable pcie_aux clock\n");
+			break;
+		}
+
+		offset = imx6_pcie_grp_offset(imx6_pcie);
+		/*
+		 * Set the over ride low and enabled
+		 * make sure that REF_CLK is turned on.
+		 */
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+				   IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
+				   0);
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+				   IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
+				   IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
+		break;
+	}
+
+	return ret;
+}
+
+static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
+{
+	u32 val;
+	struct device *dev = imx6_pcie->pci->dev;
+
+	if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr,
+				     IOMUXC_GPR22, val,
+				     val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
+				     PHY_PLL_LOCK_WAIT_USLEEP_MAX,
+				     PHY_PLL_LOCK_WAIT_TIMEOUT))
+		dev_err(dev, "PCIe PLL lock timeout\n");
+}
+
+static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
+{
+	struct dw_pcie *pci = imx6_pcie->pci;
+	struct device *dev = pci->dev;
+	int ret;
+
+	if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) {
+		ret = regulator_enable(imx6_pcie->vpcie);
+		if (ret) {
+			dev_err(dev, "failed to enable vpcie regulator: %d\n",
+				ret);
+			return;
+		}
+	}
+
+	ret = clk_prepare_enable(imx6_pcie->pcie_phy);
+	if (ret) {
+		dev_err(dev, "unable to enable pcie_phy clock\n");
+		goto err_pcie_phy;
+	}
+
+	ret = clk_prepare_enable(imx6_pcie->pcie_bus);
+	if (ret) {
+		dev_err(dev, "unable to enable pcie_bus clock\n");
+		goto err_pcie_bus;
+	}
+
+	ret = clk_prepare_enable(imx6_pcie->pcie);
+	if (ret) {
+		dev_err(dev, "unable to enable pcie clock\n");
+		goto err_pcie;
+	}
+
+	ret = imx6_pcie_enable_ref_clk(imx6_pcie);
+	if (ret) {
+		dev_err(dev, "unable to enable pcie ref clock\n");
+		goto err_ref_clk;
+	}
+
+	/* allow the clocks to stabilize */
+	usleep_range(200, 500);
+
+	switch (imx6_pcie->drvdata->variant) {
+	case IMX8MQ:
+		reset_control_deassert(imx6_pcie->pciephy_reset);
+		break;
+	case IMX7D:
+		reset_control_deassert(imx6_pcie->pciephy_reset);
+
+		/* Workaround for ERR010728, failure of PCI-e PLL VCO to
+		 * oscillate, especially when cold.  This turns off "Duty-cycle
+		 * Corrector" and other mysterious undocumented things.
+		 */
+		if (likely(imx6_pcie->phy_base)) {
+			/* De-assert DCC_FB_EN */
+			writel(PCIE_PHY_CMN_REG4_DCC_FB_EN,
+			       imx6_pcie->phy_base + PCIE_PHY_CMN_REG4);
+			/* Assert RX_EQS and RX_EQS_SEL */
+			writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL
+				| PCIE_PHY_CMN_REG24_RX_EQ,
+			       imx6_pcie->phy_base + PCIE_PHY_CMN_REG24);
+			/* Assert ATT_MODE */
+			writel(PCIE_PHY_CMN_REG26_ATT_MODE,
+			       imx6_pcie->phy_base + PCIE_PHY_CMN_REG26);
+		} else {
+			dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n");
+		}
+
+		imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
+		break;
+	case IMX6SX:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
+				   IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
+		break;
+	case IMX6QP:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+				   IMX6Q_GPR1_PCIE_SW_RST, 0);
+
+		usleep_range(200, 500);
+		break;
+	case IMX6Q:		/* Nothing to do */
+		break;
+	}
+
+	/* Some boards don't have PCIe reset GPIO. */
+	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+		msleep(100);
+		gpio_set_value_cansleep(imx6_pcie->reset_gpio,
+					!imx6_pcie->gpio_active_high);
+		/* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
+		msleep(100);
+	}
+
+	return;
+
+err_ref_clk:
+	clk_disable_unprepare(imx6_pcie->pcie);
+err_pcie:
+	clk_disable_unprepare(imx6_pcie->pcie_bus);
+err_pcie_bus:
+	clk_disable_unprepare(imx6_pcie->pcie_phy);
+err_pcie_phy:
+	if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
+		ret = regulator_disable(imx6_pcie->vpcie);
+		if (ret)
+			dev_err(dev, "failed to disable vpcie regulator: %d\n",
+				ret);
+	}
+}
+
+static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
+{
+	unsigned int mask, val;
+
+	if (imx6_pcie->drvdata->variant == IMX8MQ &&
+	    imx6_pcie->controller_id == 1) {
+		mask   = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
+		val    = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
+				    PCI_EXP_TYPE_ROOT_PORT);
+	} else {
+		mask = IMX6Q_GPR12_DEVICE_TYPE;
+		val  = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE,
+				  PCI_EXP_TYPE_ROOT_PORT);
+	}
+
+	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
+}
+
+static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+{
+	switch (imx6_pcie->drvdata->variant) {
+	case IMX8MQ:
+		/*
+		 * TODO: Currently this code assumes external
+		 * oscillator is being used
+		 */
+		regmap_update_bits(imx6_pcie->iomuxc_gpr,
+				   imx6_pcie_grp_offset(imx6_pcie),
+				   IMX8MQ_GPR_PCIE_REF_USE_PAD,
+				   IMX8MQ_GPR_PCIE_REF_USE_PAD);
+		break;
+	case IMX7D:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
+		break;
+	case IMX6SX:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				   IMX6SX_GPR12_PCIE_RX_EQ_MASK,
+				   IMX6SX_GPR12_PCIE_RX_EQ_2);
+		/* FALLTHROUGH */
+	default:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				   IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+
+		/* configure constant input signal to the pcie ctrl and phy */
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				   IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
+
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+				   IMX6Q_GPR8_TX_DEEMPH_GEN1,
+				   imx6_pcie->tx_deemph_gen1 << 0);
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+				   IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
+				   imx6_pcie->tx_deemph_gen2_3p5db << 6);
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+				   IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
+				   imx6_pcie->tx_deemph_gen2_6db << 12);
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+				   IMX6Q_GPR8_TX_SWING_FULL,
+				   imx6_pcie->tx_swing_full << 18);
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+				   IMX6Q_GPR8_TX_SWING_LOW,
+				   imx6_pcie->tx_swing_low << 25);
+		break;
+	}
+
+	imx6_pcie_configure_type(imx6_pcie);
+}
+
+static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
+{
+	unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
+	int mult, div;
+	u16 val;
+
+	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
+		return 0;
+
+	switch (phy_rate) {
+	case 125000000:
+		/*
+		 * The default settings of the MPLL are for a 125MHz input
+		 * clock, so no need to reconfigure anything in that case.
+		 */
+		return 0;
+	case 100000000:
+		mult = 25;
+		div = 0;
+		break;
+	case 200000000:
+		mult = 25;
+		div = 1;
+		break;
+	default:
+		dev_err(imx6_pcie->pci->dev,
+			"Unsupported PHY reference clock rate %lu\n", phy_rate);
+		return -EINVAL;
+	}
+
+	pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
+	val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
+		 PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
+	val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
+	val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
+	pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
+
+	pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
+	val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
+		 PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
+	val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
+	val |= PCIE_PHY_ATEOVRD_EN;
+	pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);
+
+	return 0;
+}
+
+static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
+{
+	struct dw_pcie *pci = imx6_pcie->pci;
+	struct device *dev = pci->dev;
+	u32 tmp;
+	unsigned int retries;
+
+	for (retries = 0; retries < 200; retries++) {
+		tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+		/* Test if the speed change finished. */
+		if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
+			return 0;
+		usleep_range(100, 1000);
+	}
+
+	dev_err(dev, "Speed change timeout\n");
+	return -ETIMEDOUT;
+}
+
+static void imx6_pcie_ltssm_enable(struct device *dev)
+{
+	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+
+	switch (imx6_pcie->drvdata->variant) {
+	case IMX6Q:
+	case IMX6SX:
+	case IMX6QP:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				   IMX6Q_GPR12_PCIE_CTL_2,
+				   IMX6Q_GPR12_PCIE_CTL_2);
+		break;
+	case IMX7D:
+	case IMX8MQ:
+		reset_control_deassert(imx6_pcie->apps_reset);
+		break;
+	}
+}
+
+static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
+{
+	struct dw_pcie *pci = imx6_pcie->pci;
+	struct device *dev = pci->dev;
+	u32 tmp;
+	int ret;
+
+	/*
+	 * Force Gen1 operation when starting the link.  In case the link is
+	 * started in Gen2 mode, there is a possibility the devices on the
+	 * bus will not be detected at all.  This happens with PCIe switches.
+	 */
+	tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
+	tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
+	tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
+	dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
+
+	/* Start LTSSM. */
+	imx6_pcie_ltssm_enable(dev);
+
+	ret = dw_pcie_wait_for_link(pci);
+	if (ret)
+		goto err_reset_phy;
+
+	if (imx6_pcie->link_gen == 2) {
+		/* Allow Gen2 mode after the link is up. */
+		tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
+		tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
+		tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
+		dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
+
+		/*
+		 * Start Directed Speed Change so the best possible
+		 * speed both link partners support can be negotiated.
+		 */
+		tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+		tmp |= PORT_LOGIC_SPEED_CHANGE;
+		dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
+
+		if (imx6_pcie->drvdata->flags &
+		    IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) {
+			/*
+			 * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
+			 * from i.MX6 family when no link speed transition
+			 * occurs and we go Gen1 -> yep, Gen1. The difference
+			 * is that, in such case, it will not be cleared by HW
+			 * which will cause the following code to report false
+			 * failure.
+			 */
+
+			ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
+			if (ret) {
+				dev_err(dev, "Failed to bring link up!\n");
+				goto err_reset_phy;
+			}
+		}
+
+		/* Make sure link training is finished as well! */
+		ret = dw_pcie_wait_for_link(pci);
+		if (ret) {
+			dev_err(dev, "Failed to bring link up!\n");
+			goto err_reset_phy;
+		}
+	} else {
+		dev_info(dev, "Link: Gen2 disabled\n");
+	}
+
+	tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR);
+	dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
+	return 0;
+
+err_reset_phy:
+	dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
+		dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
+		dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
+	imx6_pcie_reset_phy(imx6_pcie);
+	return ret;
+}
+
+static int imx6_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+
+	imx6_pcie_assert_core_reset(imx6_pcie);
+	imx6_pcie_init_phy(imx6_pcie);
+	imx6_pcie_deassert_core_reset(imx6_pcie);
+	imx6_setup_phy_mpll(imx6_pcie);
+	dw_pcie_setup_rc(pp);
+	imx6_pcie_establish_link(imx6_pcie);
+
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		dw_pcie_msi_init(pp);
+
+	return 0;
+}
+
+static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
+	.host_init = imx6_pcie_host_init,
+};
+
+static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
+			      struct platform_device *pdev)
+{
+	struct dw_pcie *pci = imx6_pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		pp->msi_irq = platform_get_irq_byname(pdev, "msi");
+		if (pp->msi_irq <= 0) {
+			dev_err(dev, "failed to get MSI irq\n");
+			return -ENODEV;
+		}
+	}
+
+	pp->ops = &imx6_pcie_host_ops;
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "failed to initialize host\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	/* No special ops needed, but pcie-designware still expects this struct */
+};
+
+#ifdef CONFIG_PM_SLEEP
+static void imx6_pcie_ltssm_disable(struct device *dev)
+{
+	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+
+	switch (imx6_pcie->drvdata->variant) {
+	case IMX6SX:
+	case IMX6QP:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				   IMX6Q_GPR12_PCIE_CTL_2, 0);
+		break;
+	case IMX7D:
+		reset_control_assert(imx6_pcie->apps_reset);
+		break;
+	default:
+		dev_err(dev, "ltssm_disable not supported\n");
+	}
+}
+
+static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
+{
+	struct device *dev = imx6_pcie->pci->dev;
+
+	/* Some variants have a turnoff reset in DT */
+	if (imx6_pcie->turnoff_reset) {
+		reset_control_assert(imx6_pcie->turnoff_reset);
+		reset_control_deassert(imx6_pcie->turnoff_reset);
+		goto pm_turnoff_sleep;
+	}
+
+	/* Others poke directly at IOMUXC registers */
+	switch (imx6_pcie->drvdata->variant) {
+	case IMX6SX:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				IMX6SX_GPR12_PCIE_PM_TURN_OFF,
+				IMX6SX_GPR12_PCIE_PM_TURN_OFF);
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
+		break;
+	default:
+		dev_err(dev, "PME_Turn_Off not implemented\n");
+		return;
+	}
+
+	/*
+	 * Components with an upstream port must respond to
+	 * PME_Turn_Off with PME_TO_Ack but we can't check.
+	 *
+	 * The standard recommends a 1-10ms timeout after which to
+	 * proceed anyway as if acks were received.
+	 */
+pm_turnoff_sleep:
+	usleep_range(1000, 10000);
+}
+
+static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
+{
+	clk_disable_unprepare(imx6_pcie->pcie);
+	clk_disable_unprepare(imx6_pcie->pcie_phy);
+	clk_disable_unprepare(imx6_pcie->pcie_bus);
+
+	switch (imx6_pcie->drvdata->variant) {
+	case IMX6SX:
+		clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
+		break;
+	case IMX7D:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
+				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
+		break;
+	case IMX8MQ:
+		clk_disable_unprepare(imx6_pcie->pcie_aux);
+		break;
+	default:
+		break;
+	}
+}
+
+static int imx6_pcie_suspend_noirq(struct device *dev)
+{
+	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+
+	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
+		return 0;
+
+	imx6_pcie_pm_turnoff(imx6_pcie);
+	imx6_pcie_clk_disable(imx6_pcie);
+	imx6_pcie_ltssm_disable(dev);
+
+	return 0;
+}
+
+static int imx6_pcie_resume_noirq(struct device *dev)
+{
+	int ret;
+	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+	struct pcie_port *pp = &imx6_pcie->pci->pp;
+
+	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
+		return 0;
+
+	imx6_pcie_assert_core_reset(imx6_pcie);
+	imx6_pcie_init_phy(imx6_pcie);
+	imx6_pcie_deassert_core_reset(imx6_pcie);
+	dw_pcie_setup_rc(pp);
+
+	ret = imx6_pcie_establish_link(imx6_pcie);
+	if (ret < 0)
+		dev_info(dev, "pcie link is down after resume.\n");
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops imx6_pcie_pm_ops = {
+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
+				      imx6_pcie_resume_noirq)
+};
+
+static int imx6_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dw_pcie *pci;
+	struct imx6_pcie *imx6_pcie;
+	struct device_node *np;
+	struct resource *dbi_base;
+	struct device_node *node = dev->of_node;
+	int ret;
+	u16 val;
+
+	imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
+	if (!imx6_pcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	pci->dev = dev;
+	pci->ops = &dw_pcie_ops;
+
+	imx6_pcie->pci = pci;
+	imx6_pcie->drvdata = of_device_get_match_data(dev);
+
+	/* Find the PHY if one is defined, only imx7d uses it */
+	np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0);
+	if (np) {
+		struct resource res;
+
+		ret = of_address_to_resource(np, 0, &res);
+		if (ret) {
+			dev_err(dev, "Unable to map PCIe PHY\n");
+			return ret;
+		}
+		imx6_pcie->phy_base = devm_ioremap_resource(dev, &res);
+		if (IS_ERR(imx6_pcie->phy_base)) {
+			dev_err(dev, "Unable to map PCIe PHY\n");
+			return PTR_ERR(imx6_pcie->phy_base);
+		}
+	}
+
+	dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
+	if (IS_ERR(pci->dbi_base))
+		return PTR_ERR(pci->dbi_base);
+
+	/* Fetch GPIOs */
+	imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
+	imx6_pcie->gpio_active_high = of_property_read_bool(node,
+						"reset-gpio-active-high");
+	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+		ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
+				imx6_pcie->gpio_active_high ?
+					GPIOF_OUT_INIT_HIGH :
+					GPIOF_OUT_INIT_LOW,
+				"PCIe reset");
+		if (ret) {
+			dev_err(dev, "unable to get reset gpio\n");
+			return ret;
+		}
+	} else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
+		return imx6_pcie->reset_gpio;
+	}
+
+	/* Fetch clocks */
+	imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
+	if (IS_ERR(imx6_pcie->pcie_phy)) {
+		dev_err(dev, "pcie_phy clock source missing or invalid\n");
+		return PTR_ERR(imx6_pcie->pcie_phy);
+	}
+
+	imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
+	if (IS_ERR(imx6_pcie->pcie_bus)) {
+		dev_err(dev, "pcie_bus clock source missing or invalid\n");
+		return PTR_ERR(imx6_pcie->pcie_bus);
+	}
+
+	imx6_pcie->pcie = devm_clk_get(dev, "pcie");
+	if (IS_ERR(imx6_pcie->pcie)) {
+		dev_err(dev, "pcie clock source missing or invalid\n");
+		return PTR_ERR(imx6_pcie->pcie);
+	}
+
+	switch (imx6_pcie->drvdata->variant) {
+	case IMX6SX:
+		imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
+							   "pcie_inbound_axi");
+		if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
+			dev_err(dev, "pcie_inbound_axi clock missing or invalid\n");
+			return PTR_ERR(imx6_pcie->pcie_inbound_axi);
+		}
+		break;
+	case IMX8MQ:
+		imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
+		if (IS_ERR(imx6_pcie->pcie_aux)) {
+			dev_err(dev, "pcie_aux clock source missing or invalid\n");
+			return PTR_ERR(imx6_pcie->pcie_aux);
+		}
+		/* fall through */
+	case IMX7D:
+		if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
+			imx6_pcie->controller_id = 1;
+
+		imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
+									    "pciephy");
+		if (IS_ERR(imx6_pcie->pciephy_reset)) {
+			dev_err(dev, "Failed to get PCIEPHY reset control\n");
+			return PTR_ERR(imx6_pcie->pciephy_reset);
+		}
+
+		imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
+									 "apps");
+		if (IS_ERR(imx6_pcie->apps_reset)) {
+			dev_err(dev, "Failed to get PCIE APPS reset control\n");
+			return PTR_ERR(imx6_pcie->apps_reset);
+		}
+		break;
+	default:
+		break;
+	}
+
+	/* Grab turnoff reset */
+	imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
+	if (IS_ERR(imx6_pcie->turnoff_reset)) {
+		dev_err(dev, "Failed to get TURNOFF reset control\n");
+		return PTR_ERR(imx6_pcie->turnoff_reset);
+	}
+
+	/* Grab GPR config register range */
+	imx6_pcie->iomuxc_gpr =
+		 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+	if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
+		dev_err(dev, "unable to find iomuxc registers\n");
+		return PTR_ERR(imx6_pcie->iomuxc_gpr);
+	}
+
+	/* Grab PCIe PHY Tx Settings */
+	if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
+				 &imx6_pcie->tx_deemph_gen1))
+		imx6_pcie->tx_deemph_gen1 = 0;
+
+	if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
+				 &imx6_pcie->tx_deemph_gen2_3p5db))
+		imx6_pcie->tx_deemph_gen2_3p5db = 0;
+
+	if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
+				 &imx6_pcie->tx_deemph_gen2_6db))
+		imx6_pcie->tx_deemph_gen2_6db = 20;
+
+	if (of_property_read_u32(node, "fsl,tx-swing-full",
+				 &imx6_pcie->tx_swing_full))
+		imx6_pcie->tx_swing_full = 127;
+
+	if (of_property_read_u32(node, "fsl,tx-swing-low",
+				 &imx6_pcie->tx_swing_low))
+		imx6_pcie->tx_swing_low = 127;
+
+	/* Limit link speed */
+	ret = of_property_read_u32(node, "fsl,max-link-speed",
+				   &imx6_pcie->link_gen);
+	if (ret)
+		imx6_pcie->link_gen = 1;
+
+	imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
+	if (IS_ERR(imx6_pcie->vpcie)) {
+		if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
+			return PTR_ERR(imx6_pcie->vpcie);
+		imx6_pcie->vpcie = NULL;
+	}
+
+	platform_set_drvdata(pdev, imx6_pcie);
+
+	ret = imx6_pcie_attach_pd(dev);
+	if (ret)
+		return ret;
+
+	ret = imx6_add_pcie_port(imx6_pcie, pdev);
+	if (ret < 0)
+		return ret;
+
+	if (pci_msi_enabled()) {
+		val = dw_pcie_readw_dbi(pci, PCIE_RC_IMX6_MSI_CAP +
+					PCI_MSI_FLAGS);
+		val |= PCI_MSI_FLAGS_ENABLE;
+		dw_pcie_writew_dbi(pci, PCIE_RC_IMX6_MSI_CAP + PCI_MSI_FLAGS,
+				   val);
+	}
+
+	return 0;
+}
+
+static void imx6_pcie_shutdown(struct platform_device *pdev)
+{
+	struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
+
+	/* bring down link, so bootloader gets clean state in case of reboot */
+	imx6_pcie_assert_core_reset(imx6_pcie);
+}
+
+static const struct imx6_pcie_drvdata drvdata[] = {
+	[IMX6Q] = {
+		.variant = IMX6Q,
+		.flags = IMX6_PCIE_FLAG_IMX6_PHY |
+			 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
+		.dbi_length = 0x200,
+	},
+	[IMX6SX] = {
+		.variant = IMX6SX,
+		.flags = IMX6_PCIE_FLAG_IMX6_PHY |
+			 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
+			 IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+	},
+	[IMX6QP] = {
+		.variant = IMX6QP,
+		.flags = IMX6_PCIE_FLAG_IMX6_PHY |
+			 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
+	},
+	[IMX7D] = {
+		.variant = IMX7D,
+		.flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+	},
+	[IMX8MQ] = {
+		.variant = IMX8MQ,
+	},
+};
+
+static const struct of_device_id imx6_pcie_of_match[] = {
+	{ .compatible = "fsl,imx6q-pcie",  .data = &drvdata[IMX6Q],  },
+	{ .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], },
+	{ .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], },
+	{ .compatible = "fsl,imx7d-pcie",  .data = &drvdata[IMX7D],  },
+	{ .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], } ,
+	{},
+};
+
+static struct platform_driver imx6_pcie_driver = {
+	.driver = {
+		.name	= "imx6q-pcie",
+		.of_match_table = imx6_pcie_of_match,
+		.suppress_bind_attrs = true,
+		.pm = &imx6_pcie_pm_ops,
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+	},
+	.probe    = imx6_pcie_probe,
+	.shutdown = imx6_pcie_shutdown,
+};
+
+static void imx6_pcie_quirk(struct pci_dev *dev)
+{
+	struct pci_bus *bus = dev->bus;
+	struct pcie_port *pp = bus->sysdata;
+
+	/* Bus parent is the PCI bridge, its parent is this platform driver */
+	if (!bus->dev.parent || !bus->dev.parent->parent)
+		return;
+
+	/* Make sure we only quirk devices associated with this driver */
+	if (bus->dev.parent->parent->driver != &imx6_pcie_driver.driver)
+		return;
+
+	if (bus->number == pp->root_bus_nr) {
+		struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+		struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+
+		/*
+		 * Limit config length to avoid the kernel reading beyond
+		 * the register set and causing an abort on i.MX 6Quad
+		 */
+		if (imx6_pcie->drvdata->dbi_length) {
+			dev->cfg_size = imx6_pcie->drvdata->dbi_length;
+			dev_info(&dev->dev, "Limiting cfg_size to %d\n",
+					dev->cfg_size);
+		}
+	}
+}
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd,
+			PCI_CLASS_BRIDGE_PCI, 8, imx6_pcie_quirk);
+
+static int __init imx6_pcie_init(void)
+{
+#ifdef CONFIG_ARM
+	struct device_node *np;
+
+	np = of_find_matching_node(NULL, imx6_pcie_of_match);
+	if (!np)
+		return -ENODEV;
+	of_node_put(np);
+
+	/*
+	 * Since probe() can be deferred we need to make sure that
+	 * hook_fault_code is not called after __init memory is freed
+	 * by kernel and since imx6q_pcie_abort_handler() is a no-op,
+	 * we can install the handler here without risking it
+	 * accessing some uninitialized driver state.
+	 */
+	hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0,
+			"external abort on non-linefetch");
+#endif
+
+	return platform_driver_register(&imx6_pcie_driver);
+}
+device_initcall(imx6_pcie_init);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pci-keystone.c b/marvell/linux/drivers/pci/controller/dwc/pci-keystone.c
new file mode 100644
index 0000000..b28d3c4
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pci-keystone.c
@@ -0,0 +1,1485 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Texas Instruments Keystone SoCs
+ *
+ * Copyright (C) 2013-2014 Texas Instruments., Ltd.
+ *		http://www.ti.com
+ *
+ * Author: Murali Karicheri <m-karicheri2@ti.com>
+ * Implementation based on pci-exynos.c and pcie-designware.c
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/syscon.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+#define PCIE_VENDORID_MASK	0xffff
+#define PCIE_DEVICEID_SHIFT	16
+
+/* Application registers */
+#define PID				0x000
+#define RTL				GENMASK(15, 11)
+#define RTL_SHIFT			11
+#define AM6_PCI_PG1_RTL_VER		0x15
+
+#define CMD_STATUS			0x004
+#define LTSSM_EN_VAL		        BIT(0)
+#define OB_XLAT_EN_VAL		        BIT(1)
+#define DBI_CS2				BIT(5)
+
+#define CFG_SETUP			0x008
+#define CFG_BUS(x)			(((x) & 0xff) << 16)
+#define CFG_DEVICE(x)			(((x) & 0x1f) << 8)
+#define CFG_FUNC(x)			((x) & 0x7)
+#define CFG_TYPE1			BIT(24)
+
+#define OB_SIZE				0x030
+#define OB_OFFSET_INDEX(n)		(0x200 + (8 * (n)))
+#define OB_OFFSET_HI(n)			(0x204 + (8 * (n)))
+#define OB_ENABLEN			BIT(0)
+#define OB_WIN_SIZE			8	/* 8MB */
+
+#define PCIE_LEGACY_IRQ_ENABLE_SET(n)	(0x188 + (0x10 * ((n) - 1)))
+#define PCIE_LEGACY_IRQ_ENABLE_CLR(n)	(0x18c + (0x10 * ((n) - 1)))
+#define PCIE_EP_IRQ_SET			0x64
+#define PCIE_EP_IRQ_CLR			0x68
+#define INT_ENABLE			BIT(0)
+
+/* IRQ register defines */
+#define IRQ_EOI				0x050
+
+#define MSI_IRQ				0x054
+#define MSI_IRQ_STATUS(n)		(0x104 + ((n) << 4))
+#define MSI_IRQ_ENABLE_SET(n)		(0x108 + ((n) << 4))
+#define MSI_IRQ_ENABLE_CLR(n)		(0x10c + ((n) << 4))
+#define MSI_IRQ_OFFSET			4
+
+#define IRQ_STATUS(n)			(0x184 + ((n) << 4))
+#define IRQ_ENABLE_SET(n)		(0x188 + ((n) << 4))
+#define INTx_EN				BIT(0)
+
+#define ERR_IRQ_STATUS			0x1c4
+#define ERR_IRQ_ENABLE_SET		0x1c8
+#define ERR_AER				BIT(5)	/* ECRC error */
+#define AM6_ERR_AER			BIT(4)	/* AM6 ECRC error */
+#define ERR_AXI				BIT(4)	/* AXI tag lookup fatal error */
+#define ERR_CORR			BIT(3)	/* Correctable error */
+#define ERR_NONFATAL			BIT(2)	/* Non-fatal error */
+#define ERR_FATAL			BIT(1)	/* Fatal error */
+#define ERR_SYS				BIT(0)	/* System error */
+#define ERR_IRQ_ALL			(ERR_AER | ERR_AXI | ERR_CORR | \
+					 ERR_NONFATAL | ERR_FATAL | ERR_SYS)
+
+/* PCIE controller device IDs */
+#define PCIE_RC_K2HK			0xb008
+#define PCIE_RC_K2E			0xb009
+#define PCIE_RC_K2L			0xb00a
+#define PCIE_RC_K2G			0xb00b
+
+#define KS_PCIE_DEV_TYPE_MASK		(0x3 << 1)
+#define KS_PCIE_DEV_TYPE(mode)		((mode) << 1)
+
+#define EP				0x0
+#define LEG_EP				0x1
+#define RC				0x2
+
+#define EXP_CAP_ID_OFFSET		0x70
+
+#define KS_PCIE_SYSCLOCKOUTEN		BIT(0)
+
+#define AM654_PCIE_DEV_TYPE_MASK	0x3
+#define AM654_WIN_SIZE			SZ_64K
+
+#define APP_ADDR_SPACE_0		(16 * SZ_1K)
+
+#define to_keystone_pcie(x)		dev_get_drvdata((x)->dev)
+
+#define PCI_DEVICE_ID_TI_AM654X		0xb00c
+
+struct ks_pcie_of_data {
+	enum dw_pcie_device_mode mode;
+	const struct dw_pcie_host_ops *host_ops;
+	const struct dw_pcie_ep_ops *ep_ops;
+	unsigned int version;
+};
+
+struct keystone_pcie {
+	struct dw_pcie		*pci;
+	/* PCI Device ID */
+	u32			device_id;
+	int			legacy_host_irqs[PCI_NUM_INTX];
+	struct			device_node *legacy_intc_np;
+
+	int			msi_host_irq;
+	int			num_lanes;
+	u32			num_viewport;
+	struct phy		**phy;
+	struct device_link	**link;
+	struct			device_node *msi_intc_np;
+	struct irq_domain	*legacy_irq_domain;
+	struct device_node	*np;
+
+	/* Application register space */
+	void __iomem		*va_app_base;	/* DT 1st resource */
+	struct resource		app;
+	bool			is_am6;
+};
+
+static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
+{
+	return readl(ks_pcie->va_app_base + offset);
+}
+
+static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset,
+			       u32 val)
+{
+	writel(val, ks_pcie->va_app_base + offset);
+}
+
+static void ks_pcie_msi_irq_ack(struct irq_data *data)
+{
+	struct pcie_port *pp  = irq_data_get_irq_chip_data(data);
+	struct keystone_pcie *ks_pcie;
+	u32 irq = data->hwirq;
+	struct dw_pcie *pci;
+	u32 reg_offset;
+	u32 bit_pos;
+
+	pci = to_dw_pcie_from_pp(pp);
+	ks_pcie = to_keystone_pcie(pci);
+
+	reg_offset = irq % 8;
+	bit_pos = irq >> 3;
+
+	ks_pcie_app_writel(ks_pcie, MSI_IRQ_STATUS(reg_offset),
+			   BIT(bit_pos));
+	ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
+}
+
+static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+	struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+	struct keystone_pcie *ks_pcie;
+	struct dw_pcie *pci;
+	u64 msi_target;
+
+	pci = to_dw_pcie_from_pp(pp);
+	ks_pcie = to_keystone_pcie(pci);
+
+	msi_target = ks_pcie->app.start + MSI_IRQ;
+	msg->address_lo = lower_32_bits(msi_target);
+	msg->address_hi = upper_32_bits(msi_target);
+	msg->data = data->hwirq;
+
+	dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
+		(int)data->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static int ks_pcie_msi_set_affinity(struct irq_data *irq_data,
+				    const struct cpumask *mask, bool force)
+{
+	return -EINVAL;
+}
+
+static void ks_pcie_msi_mask(struct irq_data *data)
+{
+	struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+	struct keystone_pcie *ks_pcie;
+	u32 irq = data->hwirq;
+	struct dw_pcie *pci;
+	unsigned long flags;
+	u32 reg_offset;
+	u32 bit_pos;
+
+	raw_spin_lock_irqsave(&pp->lock, flags);
+
+	pci = to_dw_pcie_from_pp(pp);
+	ks_pcie = to_keystone_pcie(pci);
+
+	reg_offset = irq % 8;
+	bit_pos = irq >> 3;
+
+	ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_CLR(reg_offset),
+			   BIT(bit_pos));
+
+	raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static void ks_pcie_msi_unmask(struct irq_data *data)
+{
+	struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+	struct keystone_pcie *ks_pcie;
+	u32 irq = data->hwirq;
+	struct dw_pcie *pci;
+	unsigned long flags;
+	u32 reg_offset;
+	u32 bit_pos;
+
+	raw_spin_lock_irqsave(&pp->lock, flags);
+
+	pci = to_dw_pcie_from_pp(pp);
+	ks_pcie = to_keystone_pcie(pci);
+
+	reg_offset = irq % 8;
+	bit_pos = irq >> 3;
+
+	ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_SET(reg_offset),
+			   BIT(bit_pos));
+
+	raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static struct irq_chip ks_pcie_msi_irq_chip = {
+	.name = "KEYSTONE-PCI-MSI",
+	.irq_ack = ks_pcie_msi_irq_ack,
+	.irq_compose_msi_msg = ks_pcie_compose_msi_msg,
+	.irq_set_affinity = ks_pcie_msi_set_affinity,
+	.irq_mask = ks_pcie_msi_mask,
+	.irq_unmask = ks_pcie_msi_unmask,
+};
+
+static int ks_pcie_msi_host_init(struct pcie_port *pp)
+{
+	pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
+	return dw_pcie_allocate_domains(pp);
+}
+
+static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
+				      int offset)
+{
+	struct dw_pcie *pci = ks_pcie->pci;
+	struct device *dev = pci->dev;
+	u32 pending;
+	int virq;
+
+	pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset));
+
+	if (BIT(0) & pending) {
+		virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
+		dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
+		generic_handle_irq(virq);
+	}
+
+	/* EOI the INTx interrupt */
+	ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset);
+}
+
+/*
+ * Dummy function so that DW core doesn't configure MSI
+ */
+static int ks_pcie_am654_msi_host_init(struct pcie_port *pp)
+{
+	return 0;
+}
+
+static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
+{
+	ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
+}
+
+static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
+{
+	u32 reg;
+	struct device *dev = ks_pcie->pci->dev;
+
+	reg = ks_pcie_app_readl(ks_pcie, ERR_IRQ_STATUS);
+	if (!reg)
+		return IRQ_NONE;
+
+	if (reg & ERR_SYS)
+		dev_err(dev, "System Error\n");
+
+	if (reg & ERR_FATAL)
+		dev_err(dev, "Fatal Error\n");
+
+	if (reg & ERR_NONFATAL)
+		dev_dbg(dev, "Non Fatal Error\n");
+
+	if (reg & ERR_CORR)
+		dev_dbg(dev, "Correctable Error\n");
+
+	if (!ks_pcie->is_am6 && (reg & ERR_AXI))
+		dev_err(dev, "AXI tag lookup fatal Error\n");
+
+	if (reg & ERR_AER || (ks_pcie->is_am6 && (reg & AM6_ERR_AER)))
+		dev_err(dev, "ECRC Error\n");
+
+	ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg);
+
+	return IRQ_HANDLED;
+}
+
+static void ks_pcie_ack_legacy_irq(struct irq_data *d)
+{
+}
+
+static void ks_pcie_mask_legacy_irq(struct irq_data *d)
+{
+}
+
+static void ks_pcie_unmask_legacy_irq(struct irq_data *d)
+{
+}
+
+static struct irq_chip ks_pcie_legacy_irq_chip = {
+	.name = "Keystone-PCI-Legacy-IRQ",
+	.irq_ack = ks_pcie_ack_legacy_irq,
+	.irq_mask = ks_pcie_mask_legacy_irq,
+	.irq_unmask = ks_pcie_unmask_legacy_irq,
+};
+
+static int ks_pcie_init_legacy_irq_map(struct irq_domain *d,
+				       unsigned int irq,
+				       irq_hw_number_t hw_irq)
+{
+	irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip,
+				 handle_level_irq);
+	irq_set_chip_data(irq, d->host_data);
+
+	return 0;
+}
+
+static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = {
+	.map = ks_pcie_init_legacy_irq_map,
+	.xlate = irq_domain_xlate_onetwocell,
+};
+
+/**
+ * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
+ * registers
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
+{
+	u32 val;
+
+	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+	val |= DBI_CS2;
+	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+	do {
+		val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+	} while (!(val & DBI_CS2));
+}
+
+/**
+ * ks_pcie_clear_dbi_mode() - Disable DBI mode
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
+{
+	u32 val;
+
+	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+	val &= ~DBI_CS2;
+	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+	do {
+		val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+	} while (val & DBI_CS2);
+}
+
+static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
+{
+	u32 val;
+	u32 num_viewport = ks_pcie->num_viewport;
+	struct dw_pcie *pci = ks_pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	u64 start = pp->mem->start;
+	u64 end = pp->mem->end;
+	int i;
+
+	/* Disable BARs for inbound access */
+	ks_pcie_set_dbi_mode(ks_pcie);
+	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
+	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
+	ks_pcie_clear_dbi_mode(ks_pcie);
+
+	if (ks_pcie->is_am6)
+		return;
+
+	val = ilog2(OB_WIN_SIZE);
+	ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
+
+	/* Using Direct 1:1 mapping of RC <-> PCI memory space */
+	for (i = 0; i < num_viewport && (start < end); i++) {
+		ks_pcie_app_writel(ks_pcie, OB_OFFSET_INDEX(i),
+				   lower_32_bits(start) | OB_ENABLEN);
+		ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i),
+				   upper_32_bits(start));
+		start += OB_WIN_SIZE * SZ_1M;
+	}
+
+	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+	val |= OB_XLAT_EN_VAL;
+	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+}
+
+static int ks_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+				 unsigned int devfn, int where, int size,
+				 u32 *val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+	u32 reg;
+
+	reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
+		CFG_FUNC(PCI_FUNC(devfn));
+	if (bus->parent->number != pp->root_bus_nr)
+		reg |= CFG_TYPE1;
+	ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
+
+	return dw_pcie_read(pp->va_cfg0_base + where, size, val);
+}
+
+static int ks_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+				 unsigned int devfn, int where, int size,
+				 u32 val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+	u32 reg;
+
+	reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
+		CFG_FUNC(PCI_FUNC(devfn));
+	if (bus->parent->number != pp->root_bus_nr)
+		reg |= CFG_TYPE1;
+	ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
+
+	return dw_pcie_write(pp->va_cfg0_base + where, size, val);
+}
+
+/**
+ * ks_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
+ *
+ * This sets BAR0 to enable inbound access for MSI_IRQ register
+ */
+static void ks_pcie_v3_65_scan_bus(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+	/* Configure and set up BAR0 */
+	ks_pcie_set_dbi_mode(ks_pcie);
+
+	/* Enable BAR0 */
+	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
+	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
+
+	ks_pcie_clear_dbi_mode(ks_pcie);
+
+	 /*
+	  * For BAR0, just setting bus address for inbound writes (MSI) should
+	  * be sufficient.  Use physical address to avoid any conflicts.
+	  */
+	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
+}
+
+/**
+ * ks_pcie_link_up() - Check if link up
+ */
+static int ks_pcie_link_up(struct dw_pcie *pci)
+{
+	u32 val;
+
+	val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
+	val &= PORT_LOGIC_LTSSM_STATE_MASK;
+	return (val == PORT_LOGIC_LTSSM_STATE_L0);
+}
+
+static void ks_pcie_stop_link(struct dw_pcie *pci)
+{
+	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+	u32 val;
+
+	/* Disable Link training */
+	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+	val &= ~LTSSM_EN_VAL;
+	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+}
+
+static int ks_pcie_start_link(struct dw_pcie *pci)
+{
+	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+	struct device *dev = pci->dev;
+	u32 val;
+
+	if (dw_pcie_link_up(pci)) {
+		dev_dbg(dev, "link is already up\n");
+		return 0;
+	}
+
+	/* Initiate Link Training */
+	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+	ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
+
+	return 0;
+}
+
+static void ks_pcie_quirk(struct pci_dev *dev)
+{
+	struct pci_bus *bus = dev->bus;
+	struct keystone_pcie *ks_pcie;
+	struct device *bridge_dev;
+	struct pci_dev *bridge;
+	u32 val;
+
+	static const struct pci_device_id rc_pci_devids[] = {
+		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
+		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E),
+		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
+		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G),
+		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+		{ 0, },
+	};
+	static const struct pci_device_id am6_pci_devids[] = {
+		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654X),
+		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+		{ 0, },
+	};
+
+	if (pci_is_root_bus(bus))
+		bridge = dev;
+
+	/* look for the host bridge */
+	while (!pci_is_root_bus(bus)) {
+		bridge = bus->self;
+		bus = bus->parent;
+	}
+
+	if (!bridge)
+		return;
+
+	/*
+	 * Keystone PCI controller has a h/w limitation of
+	 * 256 bytes maximum read request size.  It can't handle
+	 * anything higher than this.  So force this limit on
+	 * all downstream devices.
+	 */
+	if (pci_match_id(rc_pci_devids, bridge)) {
+		if (pcie_get_readrq(dev) > 256) {
+			dev_info(&dev->dev, "limiting MRRS to 256 bytes\n");
+			pcie_set_readrq(dev, 256);
+		}
+	}
+
+	/*
+	 * Memory transactions fail with PCI controller in AM654 PG1.0
+	 * when MRRS is set to more than 128 bytes. Force the MRRS to
+	 * 128 bytes in all downstream devices.
+	 */
+	if (pci_match_id(am6_pci_devids, bridge)) {
+		bridge_dev = pci_get_host_bridge_device(dev);
+		if (!bridge_dev || !bridge_dev->parent)
+			return;
+
+		ks_pcie = dev_get_drvdata(bridge_dev->parent);
+		if (!ks_pcie)
+			return;
+
+		val = ks_pcie_app_readl(ks_pcie, PID);
+		val &= RTL;
+		val >>= RTL_SHIFT;
+		if (val != AM6_PCI_PG1_RTL_VER)
+			return;
+
+		if (pcie_get_readrq(dev) > 128) {
+			dev_info(&dev->dev, "limiting MRRS to 128 bytes\n");
+			pcie_set_readrq(dev, 128);
+		}
+	}
+}
+DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
+
+static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
+{
+	unsigned int irq = desc->irq_data.hwirq;
+	struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
+	u32 offset = irq - ks_pcie->msi_host_irq;
+	struct dw_pcie *pci = ks_pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = pci->dev;
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	u32 vector, virq, reg, pos;
+
+	dev_dbg(dev, "%s, irq %d\n", __func__, irq);
+
+	/*
+	 * The chained irq handler installation would have replaced normal
+	 * interrupt driver handler so we need to take care of mask/unmask and
+	 * ack operation.
+	 */
+	chained_irq_enter(chip, desc);
+
+	reg = ks_pcie_app_readl(ks_pcie, MSI_IRQ_STATUS(offset));
+	/*
+	 * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
+	 * shows 1, 9, 17, 25 and so forth
+	 */
+	for (pos = 0; pos < 4; pos++) {
+		if (!(reg & BIT(pos)))
+			continue;
+
+		vector = offset + (pos << 3);
+		virq = irq_linear_revmap(pp->irq_domain, vector);
+		dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", pos, vector,
+			virq);
+		generic_handle_irq(virq);
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+/**
+ * ks_pcie_legacy_irq_handler() - Handle legacy interrupt
+ * @irq: IRQ line for legacy interrupts
+ * @desc: Pointer to irq descriptor
+ *
+ * Traverse through pending legacy interrupts and invoke handler for each. Also
+ * takes care of interrupt controller level mask/ack operation.
+ */
+static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
+{
+	unsigned int irq = irq_desc_get_irq(desc);
+	struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
+	struct dw_pcie *pci = ks_pcie->pci;
+	struct device *dev = pci->dev;
+	u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+
+	dev_dbg(dev, ": Handling legacy irq %d\n", irq);
+
+	/*
+	 * The chained irq handler installation would have replaced normal
+	 * interrupt driver handler so we need to take care of mask/unmask and
+	 * ack operation.
+	 */
+	chained_irq_enter(chip, desc);
+	ks_pcie_handle_legacy_irq(ks_pcie, irq_offset);
+	chained_irq_exit(chip, desc);
+}
+
+static int ks_pcie_config_msi_irq(struct keystone_pcie *ks_pcie)
+{
+	struct device *dev = ks_pcie->pci->dev;
+	struct device_node *np = ks_pcie->np;
+	struct device_node *intc_np;
+	struct irq_data *irq_data;
+	int irq_count, irq, ret, i;
+
+	if (!IS_ENABLED(CONFIG_PCI_MSI))
+		return 0;
+
+	intc_np = of_get_child_by_name(np, "msi-interrupt-controller");
+	if (!intc_np) {
+		if (ks_pcie->is_am6)
+			return 0;
+		dev_warn(dev, "msi-interrupt-controller node is absent\n");
+		return -EINVAL;
+	}
+
+	irq_count = of_irq_count(intc_np);
+	if (!irq_count) {
+		dev_err(dev, "No IRQ entries in msi-interrupt-controller\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	for (i = 0; i < irq_count; i++) {
+		irq = irq_of_parse_and_map(intc_np, i);
+		if (!irq) {
+			ret = -EINVAL;
+			goto err;
+		}
+
+		if (!ks_pcie->msi_host_irq) {
+			irq_data = irq_get_irq_data(irq);
+			if (!irq_data) {
+				ret = -EINVAL;
+				goto err;
+			}
+			ks_pcie->msi_host_irq = irq_data->hwirq;
+		}
+
+		irq_set_chained_handler_and_data(irq, ks_pcie_msi_irq_handler,
+						 ks_pcie);
+	}
+
+	of_node_put(intc_np);
+	return 0;
+
+err:
+	of_node_put(intc_np);
+	return ret;
+}
+
+static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
+{
+	struct device *dev = ks_pcie->pci->dev;
+	struct irq_domain *legacy_irq_domain;
+	struct device_node *np = ks_pcie->np;
+	struct device_node *intc_np;
+	int irq_count, irq, ret = 0, i;
+
+	intc_np = of_get_child_by_name(np, "legacy-interrupt-controller");
+	if (!intc_np) {
+		/*
+		 * Since legacy interrupts are modeled as edge-interrupts in
+		 * AM6, keep it disabled for now.
+		 */
+		if (ks_pcie->is_am6)
+			return 0;
+		dev_warn(dev, "legacy-interrupt-controller node is absent\n");
+		return -EINVAL;
+	}
+
+	irq_count = of_irq_count(intc_np);
+	if (!irq_count) {
+		dev_err(dev, "No IRQ entries in legacy-interrupt-controller\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	for (i = 0; i < irq_count; i++) {
+		irq = irq_of_parse_and_map(intc_np, i);
+		if (!irq) {
+			ret = -EINVAL;
+			goto err;
+		}
+		ks_pcie->legacy_host_irqs[i] = irq;
+
+		irq_set_chained_handler_and_data(irq,
+						 ks_pcie_legacy_irq_handler,
+						 ks_pcie);
+	}
+
+	legacy_irq_domain =
+		irq_domain_add_linear(intc_np, PCI_NUM_INTX,
+				      &ks_pcie_legacy_irq_domain_ops, NULL);
+	if (!legacy_irq_domain) {
+		dev_err(dev, "Failed to add irq domain for legacy irqs\n");
+		ret = -EINVAL;
+		goto err;
+	}
+	ks_pcie->legacy_irq_domain = legacy_irq_domain;
+
+	for (i = 0; i < PCI_NUM_INTX; i++)
+		ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN);
+
+err:
+	of_node_put(intc_np);
+	return ret;
+}
+
+#ifdef CONFIG_ARM
+/*
+ * When a PCI device does not exist during config cycles, keystone host gets a
+ * bus error instead of returning 0xffffffff. This handler always returns 0
+ * for this kind of faults.
+ */
+static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
+			 struct pt_regs *regs)
+{
+	unsigned long instr = *(unsigned long *) instruction_pointer(regs);
+
+	if ((instr & 0x0e100090) == 0x00100090) {
+		int reg = (instr >> 12) & 15;
+
+		regs->uregs[reg] = -1;
+		regs->ARM_pc += 4;
+	}
+
+	return 0;
+}
+#endif
+
+static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
+{
+	int ret;
+	unsigned int id;
+	struct regmap *devctrl_regs;
+	struct dw_pcie *pci = ks_pcie->pci;
+	struct device *dev = pci->dev;
+	struct device_node *np = dev->of_node;
+
+	devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-id");
+	if (IS_ERR(devctrl_regs))
+		return PTR_ERR(devctrl_regs);
+
+	ret = regmap_read(devctrl_regs, 0, &id);
+	if (ret)
+		return ret;
+
+	dw_pcie_dbi_ro_wr_en(pci);
+	dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK);
+	dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT);
+	dw_pcie_dbi_ro_wr_dis(pci);
+
+	return 0;
+}
+
+static int __init ks_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+	int ret;
+
+	ret = ks_pcie_config_legacy_irq(ks_pcie);
+	if (ret)
+		return ret;
+
+	ret = ks_pcie_config_msi_irq(ks_pcie);
+	if (ret)
+		return ret;
+
+	dw_pcie_setup_rc(pp);
+
+	ks_pcie_stop_link(pci);
+	ks_pcie_setup_rc_app_regs(ks_pcie);
+	writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
+			pci->dbi_base + PCI_IO_BASE);
+
+	ret = ks_pcie_init_id(ks_pcie);
+	if (ret < 0)
+		return ret;
+
+#ifdef CONFIG_ARM
+	/*
+	 * PCIe access errors that result into OCP errors are caught by ARM as
+	 * "External aborts"
+	 */
+	hook_fault_code(17, ks_pcie_fault, SIGBUS, 0,
+			"Asynchronous external abort");
+#endif
+
+	ks_pcie_start_link(pci);
+	dw_pcie_wait_for_link(pci);
+
+	return 0;
+}
+
+static const struct dw_pcie_host_ops ks_pcie_host_ops = {
+	.rd_other_conf = ks_pcie_rd_other_conf,
+	.wr_other_conf = ks_pcie_wr_other_conf,
+	.host_init = ks_pcie_host_init,
+	.msi_host_init = ks_pcie_msi_host_init,
+	.scan_bus = ks_pcie_v3_65_scan_bus,
+};
+
+static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = {
+	.host_init = ks_pcie_host_init,
+	.msi_host_init = ks_pcie_am654_msi_host_init,
+};
+
+static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
+{
+	struct keystone_pcie *ks_pcie = priv;
+
+	return ks_pcie_handle_error_irq(ks_pcie);
+}
+
+static int ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,
+				 struct platform_device *pdev)
+{
+	struct dw_pcie *pci = ks_pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	int ret;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
+	pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
+	if (IS_ERR(pp->va_cfg0_base))
+		return PTR_ERR(pp->va_cfg0_base);
+
+	pp->va_cfg1_base = pp->va_cfg0_base;
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "failed to initialize host\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static u32 ks_pcie_am654_read_dbi2(struct dw_pcie *pci, void __iomem *base,
+				   u32 reg, size_t size)
+{
+	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+	u32 val;
+
+	ks_pcie_set_dbi_mode(ks_pcie);
+	dw_pcie_read(base + reg, size, &val);
+	ks_pcie_clear_dbi_mode(ks_pcie);
+	return val;
+}
+
+static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base,
+				     u32 reg, size_t size, u32 val)
+{
+	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+	ks_pcie_set_dbi_mode(ks_pcie);
+	dw_pcie_write(base + reg, size, val);
+	ks_pcie_clear_dbi_mode(ks_pcie);
+}
+
+static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = {
+	.start_link = ks_pcie_start_link,
+	.stop_link = ks_pcie_stop_link,
+	.link_up = ks_pcie_link_up,
+	.read_dbi2 = ks_pcie_am654_read_dbi2,
+	.write_dbi2 = ks_pcie_am654_write_dbi2,
+};
+
+static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	int flags;
+
+	ep->page_size = AM654_WIN_SIZE;
+	flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32;
+	dw_pcie_writel_dbi2(pci, PCI_BASE_ADDRESS_0, APP_ADDR_SPACE_0 - 1);
+	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags);
+}
+
+static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie)
+{
+	struct dw_pcie *pci = ks_pcie->pci;
+	u8 int_pin;
+
+	int_pin = dw_pcie_readb_dbi(pci, PCI_INTERRUPT_PIN);
+	if (int_pin == 0 || int_pin > 4)
+		return;
+
+	ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_SET(int_pin),
+			   INT_ENABLE);
+	ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_SET, INT_ENABLE);
+	mdelay(1);
+	ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_CLR, INT_ENABLE);
+	ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_CLR(int_pin),
+			   INT_ENABLE);
+}
+
+static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+				   enum pci_epc_irq_type type,
+				   u16 interrupt_num)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+	switch (type) {
+	case PCI_EPC_IRQ_LEGACY:
+		ks_pcie_am654_raise_legacy_irq(ks_pcie);
+		break;
+	case PCI_EPC_IRQ_MSI:
+		dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+		break;
+	default:
+		dev_err(pci->dev, "UNKNOWN IRQ type\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static const struct pci_epc_features ks_pcie_am654_epc_features = {
+	.linkup_notifier = false,
+	.msi_capable = true,
+	.msix_capable = false,
+	.reserved_bar = 1 << BAR_0 | 1 << BAR_1,
+	.bar_fixed_64bit = 1 << BAR_0,
+	.bar_fixed_size[2] = SZ_1M,
+	.bar_fixed_size[3] = SZ_64K,
+	.bar_fixed_size[4] = 256,
+	.bar_fixed_size[5] = SZ_1M,
+	.align = SZ_1M,
+};
+
+static const struct pci_epc_features*
+ks_pcie_am654_get_features(struct dw_pcie_ep *ep)
+{
+	return &ks_pcie_am654_epc_features;
+}
+
+static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
+	.ep_init = ks_pcie_am654_ep_init,
+	.raise_irq = ks_pcie_am654_raise_irq,
+	.get_features = &ks_pcie_am654_get_features,
+};
+
+static int ks_pcie_add_pcie_ep(struct keystone_pcie *ks_pcie,
+			       struct platform_device *pdev)
+{
+	int ret;
+	struct dw_pcie_ep *ep;
+	struct resource *res;
+	struct device *dev = &pdev->dev;
+	struct dw_pcie *pci = ks_pcie->pci;
+
+	ep = &pci->ep;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+	if (!res)
+		return -EINVAL;
+
+	ep->phys_base = res->start;
+	ep->addr_size = resource_size(res);
+
+	ret = dw_pcie_ep_init(ep);
+	if (ret) {
+		dev_err(dev, "failed to initialize endpoint\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie)
+{
+	int num_lanes = ks_pcie->num_lanes;
+
+	while (num_lanes--) {
+		phy_power_off(ks_pcie->phy[num_lanes]);
+		phy_exit(ks_pcie->phy[num_lanes]);
+	}
+}
+
+static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie)
+{
+	int i;
+	int ret;
+	int num_lanes = ks_pcie->num_lanes;
+
+	for (i = 0; i < num_lanes; i++) {
+		ret = phy_reset(ks_pcie->phy[i]);
+		if (ret < 0)
+			goto err_phy;
+
+		ret = phy_init(ks_pcie->phy[i]);
+		if (ret < 0)
+			goto err_phy;
+
+		ret = phy_power_on(ks_pcie->phy[i]);
+		if (ret < 0) {
+			phy_exit(ks_pcie->phy[i]);
+			goto err_phy;
+		}
+	}
+
+	return 0;
+
+err_phy:
+	while (--i >= 0) {
+		phy_power_off(ks_pcie->phy[i]);
+		phy_exit(ks_pcie->phy[i]);
+	}
+
+	return ret;
+}
+
+static int ks_pcie_set_mode(struct device *dev)
+{
+	struct device_node *np = dev->of_node;
+	struct regmap *syscon;
+	u32 val;
+	u32 mask;
+	int ret = 0;
+
+	syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
+	if (IS_ERR(syscon))
+		return 0;
+
+	mask = KS_PCIE_DEV_TYPE_MASK | KS_PCIE_SYSCLOCKOUTEN;
+	val = KS_PCIE_DEV_TYPE(RC) | KS_PCIE_SYSCLOCKOUTEN;
+
+	ret = regmap_update_bits(syscon, 0, mask, val);
+	if (ret) {
+		dev_err(dev, "failed to set pcie mode\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ks_pcie_am654_set_mode(struct device *dev,
+				  enum dw_pcie_device_mode mode)
+{
+	struct device_node *np = dev->of_node;
+	struct regmap *syscon;
+	u32 val;
+	u32 mask;
+	int ret = 0;
+
+	syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
+	if (IS_ERR(syscon))
+		return 0;
+
+	mask = AM654_PCIE_DEV_TYPE_MASK;
+
+	switch (mode) {
+	case DW_PCIE_RC_TYPE:
+		val = RC;
+		break;
+	case DW_PCIE_EP_TYPE:
+		val = EP;
+		break;
+	default:
+		dev_err(dev, "INVALID device type %d\n", mode);
+		return -EINVAL;
+	}
+
+	ret = regmap_update_bits(syscon, 0, mask, val);
+	if (ret) {
+		dev_err(dev, "failed to set pcie mode\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static void ks_pcie_set_link_speed(struct dw_pcie *pci, int link_speed)
+{
+	u32 val;
+
+	dw_pcie_dbi_ro_wr_en(pci);
+
+	val = dw_pcie_readl_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCAP);
+	if ((val & PCI_EXP_LNKCAP_SLS) != link_speed) {
+		val &= ~((u32)PCI_EXP_LNKCAP_SLS);
+		val |= link_speed;
+		dw_pcie_writel_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCAP,
+				   val);
+	}
+
+	val = dw_pcie_readl_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCTL2);
+	if ((val & PCI_EXP_LNKCAP_SLS) != link_speed) {
+		val &= ~((u32)PCI_EXP_LNKCAP_SLS);
+		val |= link_speed;
+		dw_pcie_writel_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCTL2,
+				   val);
+	}
+
+	dw_pcie_dbi_ro_wr_dis(pci);
+}
+
+static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
+	.host_ops = &ks_pcie_host_ops,
+	.version = 0x365A,
+};
+
+static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = {
+	.host_ops = &ks_pcie_am654_host_ops,
+	.mode = DW_PCIE_RC_TYPE,
+	.version = 0x490A,
+};
+
+static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = {
+	.ep_ops = &ks_pcie_am654_ep_ops,
+	.mode = DW_PCIE_EP_TYPE,
+	.version = 0x490A,
+};
+
+static const struct of_device_id ks_pcie_of_match[] = {
+	{
+		.type = "pci",
+		.data = &ks_pcie_rc_of_data,
+		.compatible = "ti,keystone-pcie",
+	},
+	{
+		.data = &ks_pcie_am654_rc_of_data,
+		.compatible = "ti,am654-pcie-rc",
+	},
+	{
+		.data = &ks_pcie_am654_ep_of_data,
+		.compatible = "ti,am654-pcie-ep",
+	},
+	{ },
+};
+
+static int ks_pcie_probe(struct platform_device *pdev)
+{
+	const struct dw_pcie_host_ops *host_ops;
+	const struct dw_pcie_ep_ops *ep_ops;
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	const struct ks_pcie_of_data *data;
+	const struct of_device_id *match;
+	enum dw_pcie_device_mode mode;
+	struct dw_pcie *pci;
+	struct keystone_pcie *ks_pcie;
+	struct device_link **link;
+	struct gpio_desc *gpiod;
+	void __iomem *atu_base;
+	struct resource *res;
+	unsigned int version;
+	void __iomem *base;
+	u32 num_viewport;
+	struct phy **phy;
+	int link_speed;
+	u32 num_lanes;
+	char name[10];
+	int ret;
+	int irq;
+	int i;
+
+	match = of_match_device(of_match_ptr(ks_pcie_of_match), dev);
+	data = (struct ks_pcie_of_data *)match->data;
+	if (!data)
+		return -EINVAL;
+
+	version = data->version;
+	host_ops = data->host_ops;
+	ep_ops = data->ep_ops;
+	mode = data->mode;
+
+	ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
+	if (!ks_pcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app");
+	ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(ks_pcie->va_app_base))
+		return PTR_ERR(ks_pcie->va_app_base);
+
+	ks_pcie->app = *res;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbics");
+	base = devm_pci_remap_cfg_resource(dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	if (of_device_is_compatible(np, "ti,am654-pcie-rc"))
+		ks_pcie->is_am6 = true;
+
+	pci->dbi_base = base;
+	pci->dbi_base2 = base;
+	pci->dev = dev;
+	pci->ops = &ks_pcie_dw_pcie_ops;
+	pci->version = version;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(dev, "missing IRQ resource: %d\n", irq);
+		return irq;
+	}
+
+	ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED,
+			  "ks-pcie-error-irq", ks_pcie);
+	if (ret < 0) {
+		dev_err(dev, "failed to request error IRQ %d\n",
+			irq);
+		return ret;
+	}
+
+	ret = of_property_read_u32(np, "num-lanes", &num_lanes);
+	if (ret)
+		num_lanes = 1;
+
+	phy = devm_kzalloc(dev, sizeof(*phy) * num_lanes, GFP_KERNEL);
+	if (!phy)
+		return -ENOMEM;
+
+	link = devm_kzalloc(dev, sizeof(*link) * num_lanes, GFP_KERNEL);
+	if (!link)
+		return -ENOMEM;
+
+	for (i = 0; i < num_lanes; i++) {
+		snprintf(name, sizeof(name), "pcie-phy%d", i);
+		phy[i] = devm_phy_optional_get(dev, name);
+		if (IS_ERR(phy[i])) {
+			ret = PTR_ERR(phy[i]);
+			goto err_link;
+		}
+
+		if (!phy[i])
+			continue;
+
+		link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
+		if (!link[i]) {
+			ret = -EINVAL;
+			goto err_link;
+		}
+	}
+
+	ks_pcie->np = np;
+	ks_pcie->pci = pci;
+	ks_pcie->link = link;
+	ks_pcie->num_lanes = num_lanes;
+	ks_pcie->phy = phy;
+
+	gpiod = devm_gpiod_get_optional(dev, "reset",
+					GPIOD_OUT_LOW);
+	if (IS_ERR(gpiod)) {
+		ret = PTR_ERR(gpiod);
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "Failed to get reset GPIO\n");
+		goto err_link;
+	}
+
+	/* Obtain references to the PHYs */
+	for (i = 0; i < num_lanes; i++)
+		phy_pm_runtime_get_sync(ks_pcie->phy[i]);
+
+	ret = ks_pcie_enable_phy(ks_pcie);
+
+	/* Release references to the PHYs */
+	for (i = 0; i < num_lanes; i++)
+		phy_pm_runtime_put_sync(ks_pcie->phy[i]);
+
+	if (ret) {
+		dev_err(dev, "failed to enable phy\n");
+		goto err_link;
+	}
+
+	platform_set_drvdata(pdev, ks_pcie);
+	pm_runtime_enable(dev);
+	ret = pm_runtime_get_sync(dev);
+	if (ret < 0) {
+		dev_err(dev, "pm_runtime_get_sync failed\n");
+		goto err_get_sync;
+	}
+
+	if (pci->version >= 0x480A) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
+		atu_base = devm_ioremap_resource(dev, res);
+		if (IS_ERR(atu_base)) {
+			ret = PTR_ERR(atu_base);
+			goto err_get_sync;
+		}
+
+		pci->atu_base = atu_base;
+
+		ret = ks_pcie_am654_set_mode(dev, mode);
+		if (ret < 0)
+			goto err_get_sync;
+	} else {
+		ret = ks_pcie_set_mode(dev);
+		if (ret < 0)
+			goto err_get_sync;
+	}
+
+	link_speed = of_pci_get_max_link_speed(np);
+	if (link_speed < 0)
+		link_speed = 2;
+
+	ks_pcie_set_link_speed(pci, link_speed);
+
+	switch (mode) {
+	case DW_PCIE_RC_TYPE:
+		if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) {
+			ret = -ENODEV;
+			goto err_get_sync;
+		}
+
+		ret = of_property_read_u32(np, "num-viewport", &num_viewport);
+		if (ret < 0) {
+			dev_err(dev, "unable to read *num-viewport* property\n");
+			goto err_get_sync;
+		}
+
+		/*
+		 * "Power Sequencing and Reset Signal Timings" table in
+		 * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 2.0
+		 * indicates PERST# should be deasserted after minimum of 100us
+		 * once REFCLK is stable. The REFCLK to the connector in RC
+		 * mode is selected while enabling the PHY. So deassert PERST#
+		 * after 100 us.
+		 */
+		if (gpiod) {
+			usleep_range(100, 200);
+			gpiod_set_value_cansleep(gpiod, 1);
+		}
+
+		ks_pcie->num_viewport = num_viewport;
+		pci->pp.ops = host_ops;
+		ret = ks_pcie_add_pcie_port(ks_pcie, pdev);
+		if (ret < 0)
+			goto err_get_sync;
+		break;
+	case DW_PCIE_EP_TYPE:
+		if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_EP)) {
+			ret = -ENODEV;
+			goto err_get_sync;
+		}
+
+		pci->ep.ops = ep_ops;
+		ret = ks_pcie_add_pcie_ep(ks_pcie, pdev);
+		if (ret < 0)
+			goto err_get_sync;
+		break;
+	default:
+		dev_err(dev, "INVALID device type %d\n", mode);
+	}
+
+	ks_pcie_enable_error_irq(ks_pcie);
+
+	return 0;
+
+err_get_sync:
+	pm_runtime_put(dev);
+	pm_runtime_disable(dev);
+	ks_pcie_disable_phy(ks_pcie);
+
+err_link:
+	while (--i >= 0 && link[i])
+		device_link_del(link[i]);
+
+	return ret;
+}
+
+static int ks_pcie_remove(struct platform_device *pdev)
+{
+	struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
+	struct device_link **link = ks_pcie->link;
+	int num_lanes = ks_pcie->num_lanes;
+	struct device *dev = &pdev->dev;
+
+	pm_runtime_put(dev);
+	pm_runtime_disable(dev);
+	ks_pcie_disable_phy(ks_pcie);
+	while (num_lanes--)
+		device_link_del(link[num_lanes]);
+
+	return 0;
+}
+
+static struct platform_driver ks_pcie_driver = {
+	.probe  = ks_pcie_probe,
+	.remove = ks_pcie_remove,
+	.driver = {
+		.name	= "keystone-pcie",
+		.of_match_table = of_match_ptr(ks_pcie_of_match),
+	},
+};
+builtin_platform_driver(ks_pcie_driver);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pci-layerscape-ep.c b/marvell/linux/drivers/pci/controller/dwc/pci-layerscape-ep.c
new file mode 100644
index 0000000..ca9aa45
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe controller EP driver for Freescale Layerscape SoCs
+ *
+ * Copyright (C) 2018 NXP Semiconductor.
+ *
+ * Author: Xiaowei Bao <xiaowei.bao@nxp.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+
+#include "pcie-designware.h"
+
+#define PCIE_DBI2_OFFSET		0x1000	/* DBI2 base address*/
+
+struct ls_pcie_ep {
+	struct dw_pcie		*pci;
+};
+
+#define to_ls_pcie_ep(x)	dev_get_drvdata((x)->dev)
+
+static int ls_pcie_establish_link(struct dw_pcie *pci)
+{
+	return 0;
+}
+
+static const struct dw_pcie_ops ls_pcie_ep_ops = {
+	.start_link = ls_pcie_establish_link,
+};
+
+static const struct of_device_id ls_pcie_ep_of_match[] = {
+	{ .compatible = "fsl,ls-pcie-ep",},
+	{ },
+};
+
+static const struct pci_epc_features ls_pcie_epc_features = {
+	.linkup_notifier = false,
+	.msi_capable = true,
+	.msix_capable = false,
+	.bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4),
+};
+
+static const struct pci_epc_features*
+ls_pcie_ep_get_features(struct dw_pcie_ep *ep)
+{
+	return &ls_pcie_epc_features;
+}
+
+static void ls_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	enum pci_barno bar;
+
+	for (bar = BAR_0; bar <= BAR_5; bar++)
+		dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+				  enum pci_epc_irq_type type, u16 interrupt_num)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	switch (type) {
+	case PCI_EPC_IRQ_LEGACY:
+		return dw_pcie_ep_raise_legacy_irq(ep, func_no);
+	case PCI_EPC_IRQ_MSI:
+		return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+	case PCI_EPC_IRQ_MSIX:
+		return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+	default:
+		dev_err(pci->dev, "UNKNOWN IRQ type\n");
+		return -EINVAL;
+	}
+}
+
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+	.ep_init = ls_pcie_ep_init,
+	.raise_irq = ls_pcie_ep_raise_irq,
+	.get_features = ls_pcie_ep_get_features,
+};
+
+static int __init ls_add_pcie_ep(struct ls_pcie_ep *pcie,
+					struct platform_device *pdev)
+{
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	struct dw_pcie_ep *ep;
+	struct resource *res;
+	int ret;
+
+	ep = &pci->ep;
+	ep->ops = &pcie_ep_ops;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+	if (!res)
+		return -EINVAL;
+
+	ep->phys_base = res->start;
+	ep->addr_size = resource_size(res);
+
+	ret = dw_pcie_ep_init(ep);
+	if (ret) {
+		dev_err(dev, "failed to initialize endpoint\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int __init ls_pcie_ep_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dw_pcie *pci;
+	struct ls_pcie_ep *pcie;
+	struct resource *dbi_base;
+	int ret;
+
+	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+	if (!pcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+	pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
+	if (IS_ERR(pci->dbi_base))
+		return PTR_ERR(pci->dbi_base);
+
+	pci->dbi_base2 = pci->dbi_base + PCIE_DBI2_OFFSET;
+	pci->dev = dev;
+	pci->ops = &ls_pcie_ep_ops;
+	pcie->pci = pci;
+
+	platform_set_drvdata(pdev, pcie);
+
+	ret = ls_add_pcie_ep(pcie, pdev);
+
+	return ret;
+}
+
+static struct platform_driver ls_pcie_ep_driver = {
+	.driver = {
+		.name = "layerscape-pcie-ep",
+		.of_match_table = ls_pcie_ep_of_match,
+		.suppress_bind_attrs = true,
+	},
+};
+builtin_platform_driver_probe(ls_pcie_ep_driver, ls_pcie_ep_probe);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pci-layerscape.c b/marvell/linux/drivers/pci/controller/dwc/pci-layerscape.c
new file mode 100644
index 0000000..3a5fa26
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pci-layerscape.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Freescale Layerscape SoCs
+ *
+ * Copyright (C) 2014 Freescale Semiconductor.
+ *
+ * Author: Minghuan Lian <Minghuan.Lian@freescale.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include "pcie-designware.h"
+
+/* PEX1/2 Misc Ports Status Register */
+#define SCFG_PEXMSCPORTSR(pex_idx)	(0x94 + (pex_idx) * 4)
+#define LTSSM_STATE_SHIFT	20
+#define LTSSM_STATE_MASK	0x3f
+#define LTSSM_PCIE_L0		0x11 /* L0 state */
+
+/* PEX Internal Configuration Registers */
+#define PCIE_STRFMR1		0x71c /* Symbol Timer & Filter Mask Register1 */
+#define PCIE_ABSERR		0x8d0 /* Bridge Slave Error Response Register */
+#define PCIE_ABSERR_SETTING	0x9401 /* Forward error of non-posted request */
+
+#define PCIE_IATU_NUM		6
+
+struct ls_pcie_drvdata {
+	u32 lut_offset;
+	u32 ltssm_shift;
+	u32 lut_dbg;
+	const struct dw_pcie_host_ops *ops;
+	const struct dw_pcie_ops *dw_pcie_ops;
+};
+
+struct ls_pcie {
+	struct dw_pcie *pci;
+	void __iomem *lut;
+	struct regmap *scfg;
+	const struct ls_pcie_drvdata *drvdata;
+	int index;
+};
+
+#define to_ls_pcie(x)	dev_get_drvdata((x)->dev)
+
+static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
+{
+	struct dw_pcie *pci = pcie->pci;
+	u32 header_type;
+
+	header_type = ioread8(pci->dbi_base + PCI_HEADER_TYPE);
+	header_type &= 0x7f;
+
+	return header_type == PCI_HEADER_TYPE_BRIDGE;
+}
+
+/* Clear multi-function bit */
+static void ls_pcie_clear_multifunction(struct ls_pcie *pcie)
+{
+	struct dw_pcie *pci = pcie->pci;
+
+	iowrite8(PCI_HEADER_TYPE_BRIDGE, pci->dbi_base + PCI_HEADER_TYPE);
+}
+
+/* Drop MSG TLP except for Vendor MSG */
+static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
+{
+	u32 val;
+	struct dw_pcie *pci = pcie->pci;
+
+	val = ioread32(pci->dbi_base + PCIE_STRFMR1);
+	val &= 0xDFFFFFFF;
+	iowrite32(val, pci->dbi_base + PCIE_STRFMR1);
+}
+
+static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie)
+{
+	int i;
+
+	for (i = 0; i < PCIE_IATU_NUM; i++)
+		dw_pcie_disable_atu(pcie->pci, i, DW_PCIE_REGION_OUTBOUND);
+}
+
+static int ls1021_pcie_link_up(struct dw_pcie *pci)
+{
+	u32 state;
+	struct ls_pcie *pcie = to_ls_pcie(pci);
+
+	if (!pcie->scfg)
+		return 0;
+
+	regmap_read(pcie->scfg, SCFG_PEXMSCPORTSR(pcie->index), &state);
+	state = (state >> LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK;
+
+	if (state < LTSSM_PCIE_L0)
+		return 0;
+
+	return 1;
+}
+
+static int ls_pcie_link_up(struct dw_pcie *pci)
+{
+	struct ls_pcie *pcie = to_ls_pcie(pci);
+	u32 state;
+
+	state = (ioread32(pcie->lut + pcie->drvdata->lut_dbg) >>
+		 pcie->drvdata->ltssm_shift) &
+		 LTSSM_STATE_MASK;
+
+	if (state < LTSSM_PCIE_L0)
+		return 0;
+
+	return 1;
+}
+
+/* Forward error response of outbound non-posted requests */
+static void ls_pcie_fix_error_response(struct ls_pcie *pcie)
+{
+	struct dw_pcie *pci = pcie->pci;
+
+	iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR);
+}
+
+static int ls_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct ls_pcie *pcie = to_ls_pcie(pci);
+
+	/*
+	 * Disable outbound windows configured by the bootloader to avoid
+	 * one transaction hitting multiple outbound windows.
+	 * dw_pcie_setup_rc() will reconfigure the outbound windows.
+	 */
+	ls_pcie_disable_outbound_atus(pcie);
+	ls_pcie_fix_error_response(pcie);
+
+	dw_pcie_dbi_ro_wr_en(pci);
+	ls_pcie_clear_multifunction(pcie);
+	dw_pcie_dbi_ro_wr_dis(pci);
+
+	ls_pcie_drop_msg_tlp(pcie);
+
+	dw_pcie_setup_rc(pp);
+
+	return 0;
+}
+
+static int ls1021_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct ls_pcie *pcie = to_ls_pcie(pci);
+	struct device *dev = pci->dev;
+	u32 index[2];
+	int ret;
+
+	pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node,
+						     "fsl,pcie-scfg");
+	if (IS_ERR(pcie->scfg)) {
+		ret = PTR_ERR(pcie->scfg);
+		dev_err(dev, "No syscfg phandle specified\n");
+		pcie->scfg = NULL;
+		return ret;
+	}
+
+	if (of_property_read_u32_array(dev->of_node,
+				       "fsl,pcie-scfg", index, 2)) {
+		pcie->scfg = NULL;
+		return -EINVAL;
+	}
+	pcie->index = index[1];
+
+	return ls_pcie_host_init(pp);
+}
+
+static int ls_pcie_msi_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct device *dev = pci->dev;
+	struct device_node *np = dev->of_node;
+	struct device_node *msi_node;
+
+	/*
+	 * The MSI domain is set by the generic of_msi_configure().  This
+	 * .msi_host_init() function keeps us from doing the default MSI
+	 * domain setup in dw_pcie_host_init() and also enforces the
+	 * requirement that "msi-parent" exists.
+	 */
+	msi_node = of_parse_phandle(np, "msi-parent", 0);
+	if (!msi_node) {
+		dev_err(dev, "failed to find msi-parent\n");
+		return -EINVAL;
+	}
+
+	of_node_put(msi_node);
+	return 0;
+}
+
+static const struct dw_pcie_host_ops ls1021_pcie_host_ops = {
+	.host_init = ls1021_pcie_host_init,
+	.msi_host_init = ls_pcie_msi_host_init,
+};
+
+static const struct dw_pcie_host_ops ls_pcie_host_ops = {
+	.host_init = ls_pcie_host_init,
+	.msi_host_init = ls_pcie_msi_host_init,
+};
+
+static const struct dw_pcie_ops dw_ls1021_pcie_ops = {
+	.link_up = ls1021_pcie_link_up,
+};
+
+static const struct dw_pcie_ops dw_ls_pcie_ops = {
+	.link_up = ls_pcie_link_up,
+};
+
+static const struct ls_pcie_drvdata ls1021_drvdata = {
+	.ops = &ls1021_pcie_host_ops,
+	.dw_pcie_ops = &dw_ls1021_pcie_ops,
+};
+
+static const struct ls_pcie_drvdata ls1043_drvdata = {
+	.lut_offset = 0x10000,
+	.ltssm_shift = 24,
+	.lut_dbg = 0x7fc,
+	.ops = &ls_pcie_host_ops,
+	.dw_pcie_ops = &dw_ls_pcie_ops,
+};
+
+static const struct ls_pcie_drvdata ls1046_drvdata = {
+	.lut_offset = 0x80000,
+	.ltssm_shift = 24,
+	.lut_dbg = 0x407fc,
+	.ops = &ls_pcie_host_ops,
+	.dw_pcie_ops = &dw_ls_pcie_ops,
+};
+
+static const struct ls_pcie_drvdata ls2080_drvdata = {
+	.lut_offset = 0x80000,
+	.ltssm_shift = 0,
+	.lut_dbg = 0x7fc,
+	.ops = &ls_pcie_host_ops,
+	.dw_pcie_ops = &dw_ls_pcie_ops,
+};
+
+static const struct ls_pcie_drvdata ls2088_drvdata = {
+	.lut_offset = 0x80000,
+	.ltssm_shift = 0,
+	.lut_dbg = 0x407fc,
+	.ops = &ls_pcie_host_ops,
+	.dw_pcie_ops = &dw_ls_pcie_ops,
+};
+
+static const struct of_device_id ls_pcie_of_match[] = {
+	{ .compatible = "fsl,ls1012a-pcie", .data = &ls1046_drvdata },
+	{ .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
+	{ .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
+	{ .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
+	{ .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
+	{ .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
+	{ .compatible = "fsl,ls2088a-pcie", .data = &ls2088_drvdata },
+	{ .compatible = "fsl,ls1088a-pcie", .data = &ls2088_drvdata },
+	{ },
+};
+
+static int __init ls_add_pcie_port(struct ls_pcie *pcie)
+{
+	struct dw_pcie *pci = pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = pci->dev;
+	int ret;
+
+	pp->ops = pcie->drvdata->ops;
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "failed to initialize host\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int __init ls_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dw_pcie *pci;
+	struct ls_pcie *pcie;
+	struct resource *dbi_base;
+	int ret;
+
+	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+	if (!pcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	pcie->drvdata = of_device_get_match_data(dev);
+
+	pci->dev = dev;
+	pci->ops = pcie->drvdata->dw_pcie_ops;
+
+	pcie->pci = pci;
+
+	dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+	pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
+	if (IS_ERR(pci->dbi_base))
+		return PTR_ERR(pci->dbi_base);
+
+	pcie->lut = pci->dbi_base + pcie->drvdata->lut_offset;
+
+	if (!ls_pcie_is_bridge(pcie))
+		return -ENODEV;
+
+	platform_set_drvdata(pdev, pcie);
+
+	ret = ls_add_pcie_port(pcie);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static struct platform_driver ls_pcie_driver = {
+	.driver = {
+		.name = "layerscape-pcie",
+		.of_match_table = ls_pcie_of_match,
+		.suppress_bind_attrs = true,
+	},
+};
+builtin_platform_driver_probe(ls_pcie_driver, ls_pcie_probe);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pci-meson.c b/marvell/linux/drivers/pci/controller/dwc/pci-meson.c
new file mode 100644
index 0000000..8c9f887
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pci-meson.c
@@ -0,0 +1,593 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Amlogic MESON SoCs
+ *
+ * Copyright (c) 2018 Amlogic, inc.
+ * Author: Yue Wang <yue.wang@amlogic.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define to_meson_pcie(x) dev_get_drvdata((x)->dev)
+
+/* External local bus interface registers */
+#define PLR_OFFSET			0x700
+#define PCIE_PORT_LINK_CTRL_OFF		(PLR_OFFSET + 0x10)
+#define FAST_LINK_MODE			BIT(7)
+#define LINK_CAPABLE_MASK		GENMASK(21, 16)
+#define LINK_CAPABLE_X1			BIT(16)
+
+#define PCIE_GEN2_CTRL_OFF		(PLR_OFFSET + 0x10c)
+#define NUM_OF_LANES_MASK		GENMASK(12, 8)
+#define NUM_OF_LANES_X1			BIT(8)
+#define DIRECT_SPEED_CHANGE		BIT(17)
+
+#define TYPE1_HDR_OFFSET		0x0
+#define PCIE_STATUS_COMMAND		(TYPE1_HDR_OFFSET + 0x04)
+#define PCI_IO_EN			BIT(0)
+#define PCI_MEM_SPACE_EN		BIT(1)
+#define PCI_BUS_MASTER_EN		BIT(2)
+
+#define PCIE_BASE_ADDR0			(TYPE1_HDR_OFFSET + 0x10)
+#define PCIE_BASE_ADDR1			(TYPE1_HDR_OFFSET + 0x14)
+
+#define PCIE_CAP_OFFSET			0x70
+#define PCIE_DEV_CTRL_DEV_STUS		(PCIE_CAP_OFFSET + 0x08)
+#define PCIE_CAP_MAX_PAYLOAD_MASK	GENMASK(7, 5)
+#define PCIE_CAP_MAX_PAYLOAD_SIZE(x)	((x) << 5)
+#define PCIE_CAP_MAX_READ_REQ_MASK	GENMASK(14, 12)
+#define PCIE_CAP_MAX_READ_REQ_SIZE(x)	((x) << 12)
+
+/* PCIe specific config registers */
+#define PCIE_CFG0			0x0
+#define APP_LTSSM_ENABLE		BIT(7)
+
+#define PCIE_CFG_STATUS12		0x30
+#define IS_SMLH_LINK_UP(x)		((x) & (1 << 6))
+#define IS_RDLH_LINK_UP(x)		((x) & (1 << 16))
+#define IS_LTSSM_UP(x)			((((x) >> 10) & 0x1f) == 0x11)
+
+#define PCIE_CFG_STATUS17		0x44
+#define PM_CURRENT_STATE(x)		(((x) >> 7) & 0x1)
+
+#define WAIT_LINKUP_TIMEOUT		4000
+#define PORT_CLK_RATE			100000000UL
+#define MAX_PAYLOAD_SIZE		256
+#define MAX_READ_REQ_SIZE		256
+#define MESON_PCIE_PHY_POWERUP		0x1c
+#define PCIE_RESET_DELAY		500
+#define PCIE_SHARED_RESET		1
+#define PCIE_NORMAL_RESET		0
+
+enum pcie_data_rate {
+	PCIE_GEN1,
+	PCIE_GEN2,
+	PCIE_GEN3,
+	PCIE_GEN4
+};
+
+struct meson_pcie_mem_res {
+	void __iomem *elbi_base;
+	void __iomem *cfg_base;
+	void __iomem *phy_base;
+};
+
+struct meson_pcie_clk_res {
+	struct clk *clk;
+	struct clk *mipi_gate;
+	struct clk *port_clk;
+	struct clk *general_clk;
+};
+
+struct meson_pcie_rc_reset {
+	struct reset_control *phy;
+	struct reset_control *port;
+	struct reset_control *apb;
+};
+
+struct meson_pcie {
+	struct dw_pcie pci;
+	struct meson_pcie_mem_res mem_res;
+	struct meson_pcie_clk_res clk_res;
+	struct meson_pcie_rc_reset mrst;
+	struct gpio_desc *reset_gpio;
+};
+
+static struct reset_control *meson_pcie_get_reset(struct meson_pcie *mp,
+						  const char *id,
+						  u32 reset_type)
+{
+	struct device *dev = mp->pci.dev;
+	struct reset_control *reset;
+
+	if (reset_type == PCIE_SHARED_RESET)
+		reset = devm_reset_control_get_shared(dev, id);
+	else
+		reset = devm_reset_control_get(dev, id);
+
+	return reset;
+}
+
+static int meson_pcie_get_resets(struct meson_pcie *mp)
+{
+	struct meson_pcie_rc_reset *mrst = &mp->mrst;
+
+	mrst->phy = meson_pcie_get_reset(mp, "phy", PCIE_SHARED_RESET);
+	if (IS_ERR(mrst->phy))
+		return PTR_ERR(mrst->phy);
+	reset_control_deassert(mrst->phy);
+
+	mrst->port = meson_pcie_get_reset(mp, "port", PCIE_NORMAL_RESET);
+	if (IS_ERR(mrst->port))
+		return PTR_ERR(mrst->port);
+	reset_control_deassert(mrst->port);
+
+	mrst->apb = meson_pcie_get_reset(mp, "apb", PCIE_SHARED_RESET);
+	if (IS_ERR(mrst->apb))
+		return PTR_ERR(mrst->apb);
+	reset_control_deassert(mrst->apb);
+
+	return 0;
+}
+
+static void __iomem *meson_pcie_get_mem(struct platform_device *pdev,
+					struct meson_pcie *mp,
+					const char *id)
+{
+	struct device *dev = mp->pci.dev;
+	struct resource *res;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, id);
+
+	return devm_ioremap_resource(dev, res);
+}
+
+static void __iomem *meson_pcie_get_mem_shared(struct platform_device *pdev,
+					       struct meson_pcie *mp,
+					       const char *id)
+{
+	struct device *dev = mp->pci.dev;
+	struct resource *res;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, id);
+	if (!res) {
+		dev_err(dev, "No REG resource %s\n", id);
+		return ERR_PTR(-ENXIO);
+	}
+
+	return devm_ioremap(dev, res->start, resource_size(res));
+}
+
+static int meson_pcie_get_mems(struct platform_device *pdev,
+			       struct meson_pcie *mp)
+{
+	mp->mem_res.elbi_base = meson_pcie_get_mem(pdev, mp, "elbi");
+	if (IS_ERR(mp->mem_res.elbi_base))
+		return PTR_ERR(mp->mem_res.elbi_base);
+
+	mp->mem_res.cfg_base = meson_pcie_get_mem(pdev, mp, "cfg");
+	if (IS_ERR(mp->mem_res.cfg_base))
+		return PTR_ERR(mp->mem_res.cfg_base);
+
+	/* Meson SoC has two PCI controllers use same phy register*/
+	mp->mem_res.phy_base = meson_pcie_get_mem_shared(pdev, mp, "phy");
+	if (IS_ERR(mp->mem_res.phy_base))
+		return PTR_ERR(mp->mem_res.phy_base);
+
+	return 0;
+}
+
+static void meson_pcie_power_on(struct meson_pcie *mp)
+{
+	writel(MESON_PCIE_PHY_POWERUP, mp->mem_res.phy_base);
+}
+
+static void meson_pcie_reset(struct meson_pcie *mp)
+{
+	struct meson_pcie_rc_reset *mrst = &mp->mrst;
+
+	reset_control_assert(mrst->phy);
+	udelay(PCIE_RESET_DELAY);
+	reset_control_deassert(mrst->phy);
+	udelay(PCIE_RESET_DELAY);
+
+	reset_control_assert(mrst->port);
+	reset_control_assert(mrst->apb);
+	udelay(PCIE_RESET_DELAY);
+	reset_control_deassert(mrst->port);
+	reset_control_deassert(mrst->apb);
+	udelay(PCIE_RESET_DELAY);
+}
+
+static inline struct clk *meson_pcie_probe_clock(struct device *dev,
+						 const char *id, u64 rate)
+{
+	struct clk *clk;
+	int ret;
+
+	clk = devm_clk_get(dev, id);
+	if (IS_ERR(clk))
+		return clk;
+
+	if (rate) {
+		ret = clk_set_rate(clk, rate);
+		if (ret) {
+			dev_err(dev, "set clk rate failed, ret = %d\n", ret);
+			return ERR_PTR(ret);
+		}
+	}
+
+	ret = clk_prepare_enable(clk);
+	if (ret) {
+		dev_err(dev, "couldn't enable clk\n");
+		return ERR_PTR(ret);
+	}
+
+	devm_add_action_or_reset(dev,
+				 (void (*) (void *))clk_disable_unprepare,
+				 clk);
+
+	return clk;
+}
+
+static int meson_pcie_probe_clocks(struct meson_pcie *mp)
+{
+	struct device *dev = mp->pci.dev;
+	struct meson_pcie_clk_res *res = &mp->clk_res;
+
+	res->port_clk = meson_pcie_probe_clock(dev, "port", PORT_CLK_RATE);
+	if (IS_ERR(res->port_clk))
+		return PTR_ERR(res->port_clk);
+
+	res->mipi_gate = meson_pcie_probe_clock(dev, "mipi", 0);
+	if (IS_ERR(res->mipi_gate))
+		return PTR_ERR(res->mipi_gate);
+
+	res->general_clk = meson_pcie_probe_clock(dev, "general", 0);
+	if (IS_ERR(res->general_clk))
+		return PTR_ERR(res->general_clk);
+
+	res->clk = meson_pcie_probe_clock(dev, "pclk", 0);
+	if (IS_ERR(res->clk))
+		return PTR_ERR(res->clk);
+
+	return 0;
+}
+
+static inline void meson_elb_writel(struct meson_pcie *mp, u32 val, u32 reg)
+{
+	writel(val, mp->mem_res.elbi_base + reg);
+}
+
+static inline u32 meson_elb_readl(struct meson_pcie *mp, u32 reg)
+{
+	return readl(mp->mem_res.elbi_base + reg);
+}
+
+static inline u32 meson_cfg_readl(struct meson_pcie *mp, u32 reg)
+{
+	return readl(mp->mem_res.cfg_base + reg);
+}
+
+static inline void meson_cfg_writel(struct meson_pcie *mp, u32 val, u32 reg)
+{
+	writel(val, mp->mem_res.cfg_base + reg);
+}
+
+static void meson_pcie_assert_reset(struct meson_pcie *mp)
+{
+	gpiod_set_value_cansleep(mp->reset_gpio, 0);
+	udelay(500);
+	gpiod_set_value_cansleep(mp->reset_gpio, 1);
+}
+
+static void meson_pcie_init_dw(struct meson_pcie *mp)
+{
+	u32 val;
+
+	val = meson_cfg_readl(mp, PCIE_CFG0);
+	val |= APP_LTSSM_ENABLE;
+	meson_cfg_writel(mp, val, PCIE_CFG0);
+
+	val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF);
+	val &= ~(LINK_CAPABLE_MASK | FAST_LINK_MODE);
+	meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF);
+
+	val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF);
+	val |= LINK_CAPABLE_X1;
+	meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF);
+
+	val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF);
+	val &= ~NUM_OF_LANES_MASK;
+	meson_elb_writel(mp, val, PCIE_GEN2_CTRL_OFF);
+
+	val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF);
+	val |= NUM_OF_LANES_X1 | DIRECT_SPEED_CHANGE;
+	meson_elb_writel(mp, val, PCIE_GEN2_CTRL_OFF);
+
+	meson_elb_writel(mp, 0x0, PCIE_BASE_ADDR0);
+	meson_elb_writel(mp, 0x0, PCIE_BASE_ADDR1);
+}
+
+static int meson_size_to_payload(struct meson_pcie *mp, int size)
+{
+	struct device *dev = mp->pci.dev;
+
+	/*
+	 * dwc supports 2^(val+7) payload size, which val is 0~5 default to 1.
+	 * So if input size is not 2^order alignment or less than 2^7 or bigger
+	 * than 2^12, just set to default size 2^(1+7).
+	 */
+	if (!is_power_of_2(size) || size < 128 || size > 4096) {
+		dev_warn(dev, "payload size %d, set to default 256\n", size);
+		return 1;
+	}
+
+	return fls(size) - 8;
+}
+
+static void meson_set_max_payload(struct meson_pcie *mp, int size)
+{
+	u32 val;
+	int max_payload_size = meson_size_to_payload(mp, size);
+
+	val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
+	val &= ~PCIE_CAP_MAX_PAYLOAD_MASK;
+	meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+
+	val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
+	val |= PCIE_CAP_MAX_PAYLOAD_SIZE(max_payload_size);
+	meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+}
+
+static void meson_set_max_rd_req_size(struct meson_pcie *mp, int size)
+{
+	u32 val;
+	int max_rd_req_size = meson_size_to_payload(mp, size);
+
+	val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
+	val &= ~PCIE_CAP_MAX_READ_REQ_MASK;
+	meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+
+	val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
+	val |= PCIE_CAP_MAX_READ_REQ_SIZE(max_rd_req_size);
+	meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+}
+
+static inline void meson_enable_memory_space(struct meson_pcie *mp)
+{
+	/* Set the RC Bus Master, Memory Space and I/O Space enables */
+	meson_elb_writel(mp, PCI_IO_EN | PCI_MEM_SPACE_EN | PCI_BUS_MASTER_EN,
+			 PCIE_STATUS_COMMAND);
+}
+
+static int meson_pcie_establish_link(struct meson_pcie *mp)
+{
+	struct dw_pcie *pci = &mp->pci;
+	struct pcie_port *pp = &pci->pp;
+
+	meson_pcie_init_dw(mp);
+	meson_set_max_payload(mp, MAX_PAYLOAD_SIZE);
+	meson_set_max_rd_req_size(mp, MAX_READ_REQ_SIZE);
+
+	dw_pcie_setup_rc(pp);
+	meson_enable_memory_space(mp);
+
+	meson_pcie_assert_reset(mp);
+
+	return dw_pcie_wait_for_link(pci);
+}
+
+static void meson_pcie_enable_interrupts(struct meson_pcie *mp)
+{
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		dw_pcie_msi_init(&mp->pci.pp);
+}
+
+static int meson_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
+				  u32 *val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	int ret;
+
+	ret = dw_pcie_read(pci->dbi_base + where, size, val);
+	if (ret != PCIBIOS_SUCCESSFUL)
+		return ret;
+
+	/*
+	 * There is a bug in the MESON AXG PCIe controller whereby software
+	 * cannot program the PCI_CLASS_DEVICE register, so we must fabricate
+	 * the return value in the config accessors.
+	 */
+	if (where == PCI_CLASS_REVISION && size == 4)
+		*val = (PCI_CLASS_BRIDGE_PCI << 16) | (*val & 0xffff);
+	else if (where == PCI_CLASS_DEVICE && size == 2)
+		*val = PCI_CLASS_BRIDGE_PCI;
+	else if (where == PCI_CLASS_DEVICE && size == 1)
+		*val = PCI_CLASS_BRIDGE_PCI & 0xff;
+	else if (where == PCI_CLASS_DEVICE + 1 && size == 1)
+		*val = (PCI_CLASS_BRIDGE_PCI >> 8) & 0xff;
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int meson_pcie_wr_own_conf(struct pcie_port *pp, int where,
+				  int size, u32 val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+	return dw_pcie_write(pci->dbi_base + where, size, val);
+}
+
+static int meson_pcie_link_up(struct dw_pcie *pci)
+{
+	struct meson_pcie *mp = to_meson_pcie(pci);
+	struct device *dev = pci->dev;
+	u32 speed_okay = 0;
+	u32 cnt = 0;
+	u32 state12, state17, smlh_up, ltssm_up, rdlh_up;
+
+	do {
+		state12 = meson_cfg_readl(mp, PCIE_CFG_STATUS12);
+		state17 = meson_cfg_readl(mp, PCIE_CFG_STATUS17);
+		smlh_up = IS_SMLH_LINK_UP(state12);
+		rdlh_up = IS_RDLH_LINK_UP(state12);
+		ltssm_up = IS_LTSSM_UP(state12);
+
+		if (PM_CURRENT_STATE(state17) < PCIE_GEN3)
+			speed_okay = 1;
+
+		if (smlh_up)
+			dev_dbg(dev, "smlh_link_up is on\n");
+		if (rdlh_up)
+			dev_dbg(dev, "rdlh_link_up is on\n");
+		if (ltssm_up)
+			dev_dbg(dev, "ltssm_up is on\n");
+		if (speed_okay)
+			dev_dbg(dev, "speed_okay\n");
+
+		if (smlh_up && rdlh_up && ltssm_up && speed_okay)
+			return 1;
+
+		cnt++;
+
+		udelay(10);
+	} while (cnt < WAIT_LINKUP_TIMEOUT);
+
+	dev_err(dev, "error: wait linkup timeout\n");
+	return 0;
+}
+
+static int meson_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct meson_pcie *mp = to_meson_pcie(pci);
+	int ret;
+
+	ret = meson_pcie_establish_link(mp);
+	if (ret)
+		return ret;
+
+	meson_pcie_enable_interrupts(mp);
+
+	return 0;
+}
+
+static const struct dw_pcie_host_ops meson_pcie_host_ops = {
+	.rd_own_conf = meson_pcie_rd_own_conf,
+	.wr_own_conf = meson_pcie_wr_own_conf,
+	.host_init = meson_pcie_host_init,
+};
+
+static int meson_add_pcie_port(struct meson_pcie *mp,
+			       struct platform_device *pdev)
+{
+	struct dw_pcie *pci = &mp->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		pp->msi_irq = platform_get_irq(pdev, 0);
+		if (pp->msi_irq < 0) {
+			dev_err(dev, "failed to get MSI IRQ\n");
+			return pp->msi_irq;
+		}
+	}
+
+	pp->ops = &meson_pcie_host_ops;
+	pci->dbi_base = mp->mem_res.elbi_base;
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "failed to initialize host\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	.link_up = meson_pcie_link_up,
+};
+
+static int meson_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dw_pcie *pci;
+	struct meson_pcie *mp;
+	int ret;
+
+	mp = devm_kzalloc(dev, sizeof(*mp), GFP_KERNEL);
+	if (!mp)
+		return -ENOMEM;
+
+	pci = &mp->pci;
+	pci->dev = dev;
+	pci->ops = &dw_pcie_ops;
+
+	mp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+	if (IS_ERR(mp->reset_gpio)) {
+		dev_err(dev, "get reset gpio failed\n");
+		return PTR_ERR(mp->reset_gpio);
+	}
+
+	ret = meson_pcie_get_resets(mp);
+	if (ret) {
+		dev_err(dev, "get reset resource failed, %d\n", ret);
+		return ret;
+	}
+
+	ret = meson_pcie_get_mems(pdev, mp);
+	if (ret) {
+		dev_err(dev, "get memory resource failed, %d\n", ret);
+		return ret;
+	}
+
+	meson_pcie_power_on(mp);
+	meson_pcie_reset(mp);
+
+	ret = meson_pcie_probe_clocks(mp);
+	if (ret) {
+		dev_err(dev, "init clock resources failed, %d\n", ret);
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, mp);
+
+	ret = meson_add_pcie_port(mp, pdev);
+	if (ret < 0) {
+		dev_err(dev, "Add PCIe port failed, %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id meson_pcie_of_match[] = {
+	{
+		.compatible = "amlogic,axg-pcie",
+	},
+	{},
+};
+
+static struct platform_driver meson_pcie_driver = {
+	.probe = meson_pcie_probe,
+	.driver = {
+		.name = "meson-pcie",
+		.of_match_table = meson_pcie_of_match,
+	},
+};
+
+builtin_platform_driver(meson_pcie_driver);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-al.c b/marvell/linux/drivers/pci/controller/dwc/pcie-al.c
new file mode 100644
index 0000000..1eeda2f
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-al.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Amazon's Annapurna Labs IP (used in chips
+ * such as Graviton and Alpine)
+ *
+ * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Author: Jonathan Chocron <jonnyc@amazon.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/pci-acpi.h>
+#include "../../pci.h"
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+
+struct al_pcie_acpi  {
+	void __iomem *dbi_base;
+};
+
+static void __iomem *al_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+				     int where)
+{
+	struct pci_config_window *cfg = bus->sysdata;
+	struct al_pcie_acpi *pcie = cfg->priv;
+	void __iomem *dbi_base = pcie->dbi_base;
+
+	if (bus->number == cfg->busr.start) {
+		/*
+		 * The DW PCIe core doesn't filter out transactions to other
+		 * devices/functions on the root bus num, so we do this here.
+		 */
+		if (PCI_SLOT(devfn) > 0)
+			return NULL;
+		else
+			return dbi_base + where;
+	}
+
+	return pci_ecam_map_bus(bus, devfn, where);
+}
+
+static int al_pcie_init(struct pci_config_window *cfg)
+{
+	struct device *dev = cfg->parent;
+	struct acpi_device *adev = to_acpi_device(dev);
+	struct acpi_pci_root *root = acpi_driver_data(adev);
+	struct al_pcie_acpi *al_pcie;
+	struct resource *res;
+	int ret;
+
+	al_pcie = devm_kzalloc(dev, sizeof(*al_pcie), GFP_KERNEL);
+	if (!al_pcie)
+		return -ENOMEM;
+
+	res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
+	if (!res)
+		return -ENOMEM;
+
+	ret = acpi_get_rc_resources(dev, "AMZN0001", root->segment, res);
+	if (ret) {
+		dev_err(dev, "can't get rc dbi base address for SEG %d\n",
+			root->segment);
+		return ret;
+	}
+
+	dev_dbg(dev, "Root port dbi res: %pR\n", res);
+
+	al_pcie->dbi_base = devm_pci_remap_cfg_resource(dev, res);
+	if (IS_ERR(al_pcie->dbi_base)) {
+		long err = PTR_ERR(al_pcie->dbi_base);
+
+		dev_err(dev, "couldn't remap dbi base %pR (err:%ld)\n",
+			res, err);
+		return err;
+	}
+
+	cfg->priv = al_pcie;
+
+	return 0;
+}
+
+struct pci_ecam_ops al_pcie_ops = {
+	.bus_shift    = 20,
+	.init         =  al_pcie_init,
+	.pci_ops      = {
+		.map_bus    = al_pcie_map_bus,
+		.read       = pci_generic_config_read,
+		.write      = pci_generic_config_write,
+	}
+};
+
+#endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */
+
+#ifdef CONFIG_PCIE_AL
+
+#include <linux/of_pci.h>
+#include "pcie-designware.h"
+
+#define AL_PCIE_REV_ID_2	2
+#define AL_PCIE_REV_ID_3	3
+#define AL_PCIE_REV_ID_4	4
+
+#define AXI_BASE_OFFSET		0x0
+
+#define DEVICE_ID_OFFSET	0x16c
+
+#define DEVICE_REV_ID			0x0
+#define DEVICE_REV_ID_DEV_ID_MASK	GENMASK(31, 16)
+
+#define DEVICE_REV_ID_DEV_ID_X4		0
+#define DEVICE_REV_ID_DEV_ID_X8		2
+#define DEVICE_REV_ID_DEV_ID_X16	4
+
+#define OB_CTRL_REV1_2_OFFSET	0x0040
+#define OB_CTRL_REV3_5_OFFSET	0x0030
+
+#define CFG_TARGET_BUS			0x0
+#define CFG_TARGET_BUS_MASK_MASK	GENMASK(7, 0)
+#define CFG_TARGET_BUS_BUSNUM_MASK	GENMASK(15, 8)
+
+#define CFG_CONTROL			0x4
+#define CFG_CONTROL_SUBBUS_MASK		GENMASK(15, 8)
+#define CFG_CONTROL_SEC_BUS_MASK	GENMASK(23, 16)
+
+struct al_pcie_reg_offsets {
+	unsigned int ob_ctrl;
+};
+
+struct al_pcie_target_bus_cfg {
+	u8 reg_val;
+	u8 reg_mask;
+	u8 ecam_mask;
+};
+
+struct al_pcie {
+	struct dw_pcie *pci;
+	void __iomem *controller_base; /* base of PCIe unit (not DW core) */
+	struct device *dev;
+	resource_size_t ecam_size;
+	unsigned int controller_rev_id;
+	struct al_pcie_reg_offsets reg_offsets;
+	struct al_pcie_target_bus_cfg target_bus_cfg;
+};
+
+#define PCIE_ECAM_DEVFN(x)		(((x) & 0xff) << 12)
+
+#define to_al_pcie(x)		dev_get_drvdata((x)->dev)
+
+static inline u32 al_pcie_controller_readl(struct al_pcie *pcie, u32 offset)
+{
+	return readl_relaxed(pcie->controller_base + offset);
+}
+
+static inline void al_pcie_controller_writel(struct al_pcie *pcie, u32 offset,
+					     u32 val)
+{
+	writel_relaxed(val, pcie->controller_base + offset);
+}
+
+static int al_pcie_rev_id_get(struct al_pcie *pcie, unsigned int *rev_id)
+{
+	u32 dev_rev_id_val;
+	u32 dev_id_val;
+
+	dev_rev_id_val = al_pcie_controller_readl(pcie, AXI_BASE_OFFSET +
+						  DEVICE_ID_OFFSET +
+						  DEVICE_REV_ID);
+	dev_id_val = FIELD_GET(DEVICE_REV_ID_DEV_ID_MASK, dev_rev_id_val);
+
+	switch (dev_id_val) {
+	case DEVICE_REV_ID_DEV_ID_X4:
+		*rev_id = AL_PCIE_REV_ID_2;
+		break;
+	case DEVICE_REV_ID_DEV_ID_X8:
+		*rev_id = AL_PCIE_REV_ID_3;
+		break;
+	case DEVICE_REV_ID_DEV_ID_X16:
+		*rev_id = AL_PCIE_REV_ID_4;
+		break;
+	default:
+		dev_err(pcie->dev, "Unsupported dev_id_val (0x%x)\n",
+			dev_id_val);
+		return -EINVAL;
+	}
+
+	dev_dbg(pcie->dev, "dev_id_val: 0x%x\n", dev_id_val);
+
+	return 0;
+}
+
+static int al_pcie_reg_offsets_set(struct al_pcie *pcie)
+{
+	switch (pcie->controller_rev_id) {
+	case AL_PCIE_REV_ID_2:
+		pcie->reg_offsets.ob_ctrl = OB_CTRL_REV1_2_OFFSET;
+		break;
+	case AL_PCIE_REV_ID_3:
+	case AL_PCIE_REV_ID_4:
+		pcie->reg_offsets.ob_ctrl = OB_CTRL_REV3_5_OFFSET;
+		break;
+	default:
+		dev_err(pcie->dev, "Unsupported controller rev_id: 0x%x\n",
+			pcie->controller_rev_id);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static inline void al_pcie_target_bus_set(struct al_pcie *pcie,
+					  u8 target_bus,
+					  u8 mask_target_bus)
+{
+	u32 reg;
+
+	reg = FIELD_PREP(CFG_TARGET_BUS_MASK_MASK, mask_target_bus) |
+	      FIELD_PREP(CFG_TARGET_BUS_BUSNUM_MASK, target_bus);
+
+	al_pcie_controller_writel(pcie, AXI_BASE_OFFSET +
+				  pcie->reg_offsets.ob_ctrl + CFG_TARGET_BUS,
+				  reg);
+}
+
+static void __iomem *al_pcie_conf_addr_map(struct al_pcie *pcie,
+					   unsigned int busnr,
+					   unsigned int devfn)
+{
+	struct al_pcie_target_bus_cfg *target_bus_cfg = &pcie->target_bus_cfg;
+	unsigned int busnr_ecam = busnr & target_bus_cfg->ecam_mask;
+	unsigned int busnr_reg = busnr & target_bus_cfg->reg_mask;
+	struct pcie_port *pp = &pcie->pci->pp;
+	void __iomem *pci_base_addr;
+
+	pci_base_addr = (void __iomem *)((uintptr_t)pp->va_cfg0_base +
+					 (busnr_ecam << 20) +
+					 PCIE_ECAM_DEVFN(devfn));
+
+	if (busnr_reg != target_bus_cfg->reg_val) {
+		dev_dbg(pcie->pci->dev, "Changing target bus busnum val from 0x%x to 0x%x\n",
+			target_bus_cfg->reg_val, busnr_reg);
+		target_bus_cfg->reg_val = busnr_reg;
+		al_pcie_target_bus_set(pcie,
+				       target_bus_cfg->reg_val,
+				       target_bus_cfg->reg_mask);
+	}
+
+	return pci_base_addr;
+}
+
+static int al_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+				 unsigned int devfn, int where, int size,
+				 u32 *val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct al_pcie *pcie = to_al_pcie(pci);
+	unsigned int busnr = bus->number;
+	void __iomem *pci_addr;
+	int rc;
+
+	pci_addr = al_pcie_conf_addr_map(pcie, busnr, devfn);
+
+	rc = dw_pcie_read(pci_addr + where, size, val);
+
+	dev_dbg(pci->dev, "%d-byte config read from %04x:%02x:%02x.%d offset 0x%x (pci_addr: 0x%px) - val:0x%x\n",
+		size, pci_domain_nr(bus), bus->number,
+		PCI_SLOT(devfn), PCI_FUNC(devfn), where,
+		(pci_addr + where), *val);
+
+	return rc;
+}
+
+static int al_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+				 unsigned int devfn, int where, int size,
+				 u32 val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct al_pcie *pcie = to_al_pcie(pci);
+	unsigned int busnr = bus->number;
+	void __iomem *pci_addr;
+	int rc;
+
+	pci_addr = al_pcie_conf_addr_map(pcie, busnr, devfn);
+
+	rc = dw_pcie_write(pci_addr + where, size, val);
+
+	dev_dbg(pci->dev, "%d-byte config write to %04x:%02x:%02x.%d offset 0x%x (pci_addr: 0x%px) - val:0x%x\n",
+		size, pci_domain_nr(bus), bus->number,
+		PCI_SLOT(devfn), PCI_FUNC(devfn), where,
+		(pci_addr + where), val);
+
+	return rc;
+}
+
+static void al_pcie_config_prepare(struct al_pcie *pcie)
+{
+	struct al_pcie_target_bus_cfg *target_bus_cfg;
+	struct pcie_port *pp = &pcie->pci->pp;
+	unsigned int ecam_bus_mask;
+	u32 cfg_control_offset;
+	u8 subordinate_bus;
+	u8 secondary_bus;
+	u32 cfg_control;
+	u32 reg;
+
+	target_bus_cfg = &pcie->target_bus_cfg;
+
+	ecam_bus_mask = (pcie->ecam_size >> 20) - 1;
+	if (ecam_bus_mask > 255) {
+		dev_warn(pcie->dev, "ECAM window size is larger than 256MB. Cutting off at 256\n");
+		ecam_bus_mask = 255;
+	}
+
+	/* This portion is taken from the transaction address */
+	target_bus_cfg->ecam_mask = ecam_bus_mask;
+	/* This portion is taken from the cfg_target_bus reg */
+	target_bus_cfg->reg_mask = ~target_bus_cfg->ecam_mask;
+	target_bus_cfg->reg_val = pp->busn->start & target_bus_cfg->reg_mask;
+
+	al_pcie_target_bus_set(pcie, target_bus_cfg->reg_val,
+			       target_bus_cfg->reg_mask);
+
+	secondary_bus = pp->busn->start + 1;
+	subordinate_bus = pp->busn->end;
+
+	/* Set the valid values of secondary and subordinate buses */
+	cfg_control_offset = AXI_BASE_OFFSET + pcie->reg_offsets.ob_ctrl +
+			     CFG_CONTROL;
+
+	cfg_control = al_pcie_controller_readl(pcie, cfg_control_offset);
+
+	reg = cfg_control &
+	      ~(CFG_CONTROL_SEC_BUS_MASK | CFG_CONTROL_SUBBUS_MASK);
+
+	reg |= FIELD_PREP(CFG_CONTROL_SUBBUS_MASK, subordinate_bus) |
+	       FIELD_PREP(CFG_CONTROL_SEC_BUS_MASK, secondary_bus);
+
+	al_pcie_controller_writel(pcie, cfg_control_offset, reg);
+}
+
+static int al_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct al_pcie *pcie = to_al_pcie(pci);
+	int rc;
+
+	rc = al_pcie_rev_id_get(pcie, &pcie->controller_rev_id);
+	if (rc)
+		return rc;
+
+	rc = al_pcie_reg_offsets_set(pcie);
+	if (rc)
+		return rc;
+
+	al_pcie_config_prepare(pcie);
+
+	return 0;
+}
+
+static const struct dw_pcie_host_ops al_pcie_host_ops = {
+	.rd_other_conf = al_pcie_rd_other_conf,
+	.wr_other_conf = al_pcie_wr_other_conf,
+	.host_init = al_pcie_host_init,
+};
+
+static int al_add_pcie_port(struct pcie_port *pp,
+			    struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	pp->ops = &al_pcie_host_ops;
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "failed to initialize host\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+};
+
+static int al_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *controller_res;
+	struct resource *ecam_res;
+	struct resource *dbi_res;
+	struct al_pcie *al_pcie;
+	struct dw_pcie *pci;
+
+	al_pcie = devm_kzalloc(dev, sizeof(*al_pcie), GFP_KERNEL);
+	if (!al_pcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	pci->dev = dev;
+	pci->ops = &dw_pcie_ops;
+
+	al_pcie->pci = pci;
+	al_pcie->dev = dev;
+
+	dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+	pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_res);
+	if (IS_ERR(pci->dbi_base)) {
+		dev_err(dev, "couldn't remap dbi base %pR\n", dbi_res);
+		return PTR_ERR(pci->dbi_base);
+	}
+
+	ecam_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
+	if (!ecam_res) {
+		dev_err(dev, "couldn't find 'config' reg in DT\n");
+		return -ENOENT;
+	}
+	al_pcie->ecam_size = resource_size(ecam_res);
+
+	controller_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						      "controller");
+	al_pcie->controller_base = devm_ioremap_resource(dev, controller_res);
+	if (IS_ERR(al_pcie->controller_base)) {
+		dev_err(dev, "couldn't remap controller base %pR\n",
+			controller_res);
+		return PTR_ERR(al_pcie->controller_base);
+	}
+
+	dev_dbg(dev, "From DT: dbi_base: %pR, controller_base: %pR\n",
+		dbi_res, controller_res);
+
+	platform_set_drvdata(pdev, al_pcie);
+
+	return al_add_pcie_port(&pci->pp, pdev);
+}
+
+static const struct of_device_id al_pcie_of_match[] = {
+	{ .compatible = "amazon,al-alpine-v2-pcie",
+	},
+	{ .compatible = "amazon,al-alpine-v3-pcie",
+	},
+	{},
+};
+
+static struct platform_driver al_pcie_driver = {
+	.driver = {
+		.name	= "al-pcie",
+		.of_match_table = al_pcie_of_match,
+		.suppress_bind_attrs = true,
+	},
+	.probe = al_pcie_probe,
+};
+builtin_platform_driver(al_pcie_driver);
+
+#endif /* CONFIG_PCIE_AL*/
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-armada8k.c b/marvell/linux/drivers/pci/controller/dwc/pcie-armada8k.c
new file mode 100644
index 0000000..4959654
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Marvell Armada-8K SoCs
+ *
+ * Armada-8K PCIe Glue Layer Source Code
+ *
+ * Copyright (C) 2016 Marvell Technology Group Ltd.
+ *
+ * Author: Yehuda Yitshak <yehuday@marvell.com>
+ * Author: Shadi Ammouri <shadi@marvell.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/of_pci.h>
+#include <linux/of_irq.h>
+
+#include "pcie-designware.h"
+
+#define ARMADA8K_PCIE_MAX_LANES PCIE_LNK_X4
+
+struct armada8k_pcie {
+	struct dw_pcie *pci;
+	struct clk *clk;
+	struct clk *clk_reg;
+	struct phy *phy[ARMADA8K_PCIE_MAX_LANES];
+	unsigned int phy_count;
+};
+
+#define PCIE_VENDOR_REGS_OFFSET		0x8000
+
+#define PCIE_GLOBAL_CONTROL_REG		(PCIE_VENDOR_REGS_OFFSET + 0x0)
+#define PCIE_APP_LTSSM_EN		BIT(2)
+#define PCIE_DEVICE_TYPE_SHIFT		4
+#define PCIE_DEVICE_TYPE_MASK		0xF
+#define PCIE_DEVICE_TYPE_RC		0x4 /* Root complex */
+
+#define PCIE_GLOBAL_STATUS_REG		(PCIE_VENDOR_REGS_OFFSET + 0x8)
+#define PCIE_GLB_STS_RDLH_LINK_UP	BIT(1)
+#define PCIE_GLB_STS_PHY_LINK_UP	BIT(9)
+
+#define PCIE_GLOBAL_INT_CAUSE1_REG	(PCIE_VENDOR_REGS_OFFSET + 0x1C)
+#define PCIE_GLOBAL_INT_MASK1_REG	(PCIE_VENDOR_REGS_OFFSET + 0x20)
+#define PCIE_INT_A_ASSERT_MASK		BIT(9)
+#define PCIE_INT_B_ASSERT_MASK		BIT(10)
+#define PCIE_INT_C_ASSERT_MASK		BIT(11)
+#define PCIE_INT_D_ASSERT_MASK		BIT(12)
+
+#define PCIE_ARCACHE_TRC_REG		(PCIE_VENDOR_REGS_OFFSET + 0x50)
+#define PCIE_AWCACHE_TRC_REG		(PCIE_VENDOR_REGS_OFFSET + 0x54)
+#define PCIE_ARUSER_REG			(PCIE_VENDOR_REGS_OFFSET + 0x5C)
+#define PCIE_AWUSER_REG			(PCIE_VENDOR_REGS_OFFSET + 0x60)
+/*
+ * AR/AW Cache defaults: Normal memory, Write-Back, Read / Write
+ * allocate
+ */
+#define ARCACHE_DEFAULT_VALUE		0x3511
+#define AWCACHE_DEFAULT_VALUE		0x5311
+
+#define DOMAIN_OUTER_SHAREABLE		0x2
+#define AX_USER_DOMAIN_MASK		0x3
+#define AX_USER_DOMAIN_SHIFT		4
+
+#define to_armada8k_pcie(x)	dev_get_drvdata((x)->dev)
+
+static void armada8k_pcie_disable_phys(struct armada8k_pcie *pcie)
+{
+	int i;
+
+	for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) {
+		phy_power_off(pcie->phy[i]);
+		phy_exit(pcie->phy[i]);
+	}
+}
+
+static int armada8k_pcie_enable_phys(struct armada8k_pcie *pcie)
+{
+	int ret;
+	int i;
+
+	for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) {
+		ret = phy_init(pcie->phy[i]);
+		if (ret)
+			return ret;
+
+		ret = phy_set_mode_ext(pcie->phy[i], PHY_MODE_PCIE,
+				       pcie->phy_count);
+		if (ret) {
+			phy_exit(pcie->phy[i]);
+			return ret;
+		}
+
+		ret = phy_power_on(pcie->phy[i]);
+		if (ret) {
+			phy_exit(pcie->phy[i]);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int armada8k_pcie_setup_phys(struct armada8k_pcie *pcie)
+{
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	struct device_node *node = dev->of_node;
+	int ret = 0;
+	int i;
+
+	for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) {
+		pcie->phy[i] = devm_of_phy_get_by_index(dev, node, i);
+		if (IS_ERR(pcie->phy[i])) {
+			if (PTR_ERR(pcie->phy[i]) != -ENODEV)
+				return PTR_ERR(pcie->phy[i]);
+
+			pcie->phy[i] = NULL;
+			continue;
+		}
+
+		pcie->phy_count++;
+	}
+
+	/* Old bindings miss the PHY handle, so just warn if there is no PHY */
+	if (!pcie->phy_count)
+		dev_warn(dev, "No available PHY\n");
+
+	ret = armada8k_pcie_enable_phys(pcie);
+	if (ret)
+		dev_err(dev, "Failed to initialize PHY(s) (%d)\n", ret);
+
+	return ret;
+}
+
+static int armada8k_pcie_link_up(struct dw_pcie *pci)
+{
+	u32 reg;
+	u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP;
+
+	reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_STATUS_REG);
+
+	if ((reg & mask) == mask)
+		return 1;
+
+	dev_dbg(pci->dev, "No link detected (Global-Status: 0x%08x).\n", reg);
+	return 0;
+}
+
+static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie)
+{
+	struct dw_pcie *pci = pcie->pci;
+	u32 reg;
+
+	if (!dw_pcie_link_up(pci)) {
+		/* Disable LTSSM state machine to enable configuration */
+		reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
+		reg &= ~(PCIE_APP_LTSSM_EN);
+		dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
+	}
+
+	/* Set the device to root complex mode */
+	reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
+	reg &= ~(PCIE_DEVICE_TYPE_MASK << PCIE_DEVICE_TYPE_SHIFT);
+	reg |= PCIE_DEVICE_TYPE_RC << PCIE_DEVICE_TYPE_SHIFT;
+	dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
+
+	/* Set the PCIe master AxCache attributes */
+	dw_pcie_writel_dbi(pci, PCIE_ARCACHE_TRC_REG, ARCACHE_DEFAULT_VALUE);
+	dw_pcie_writel_dbi(pci, PCIE_AWCACHE_TRC_REG, AWCACHE_DEFAULT_VALUE);
+
+	/* Set the PCIe master AxDomain attributes */
+	reg = dw_pcie_readl_dbi(pci, PCIE_ARUSER_REG);
+	reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
+	reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
+	dw_pcie_writel_dbi(pci, PCIE_ARUSER_REG, reg);
+
+	reg = dw_pcie_readl_dbi(pci, PCIE_AWUSER_REG);
+	reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
+	reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
+	dw_pcie_writel_dbi(pci, PCIE_AWUSER_REG, reg);
+
+	/* Enable INT A-D interrupts */
+	reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG);
+	reg |= PCIE_INT_A_ASSERT_MASK | PCIE_INT_B_ASSERT_MASK |
+	       PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK;
+	dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG, reg);
+
+	if (!dw_pcie_link_up(pci)) {
+		/* Configuration done. Start LTSSM */
+		reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
+		reg |= PCIE_APP_LTSSM_EN;
+		dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
+	}
+
+	/* Wait until the link becomes active again */
+	if (dw_pcie_wait_for_link(pci))
+		dev_err(pci->dev, "Link not up after reconfiguration\n");
+}
+
+static int armada8k_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct armada8k_pcie *pcie = to_armada8k_pcie(pci);
+
+	dw_pcie_setup_rc(pp);
+	armada8k_pcie_establish_link(pcie);
+
+	return 0;
+}
+
+static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
+{
+	struct armada8k_pcie *pcie = arg;
+	struct dw_pcie *pci = pcie->pci;
+	u32 val;
+
+	/*
+	 * Interrupts are directly handled by the device driver of the
+	 * PCI device. However, they are also latched into the PCIe
+	 * controller, so we simply discard them.
+	 */
+	val = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_INT_CAUSE1_REG);
+	dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_CAUSE1_REG, val);
+
+	return IRQ_HANDLED;
+}
+
+static const struct dw_pcie_host_ops armada8k_pcie_host_ops = {
+	.host_init = armada8k_pcie_host_init,
+};
+
+static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
+				  struct platform_device *pdev)
+{
+	struct dw_pcie *pci = pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	pp->ops = &armada8k_pcie_host_ops;
+
+	pp->irq = platform_get_irq(pdev, 0);
+	if (pp->irq < 0) {
+		dev_err(dev, "failed to get irq for port\n");
+		return pp->irq;
+	}
+
+	ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler,
+			       IRQF_SHARED, "armada8k-pcie", pcie);
+	if (ret) {
+		dev_err(dev, "failed to request irq %d\n", pp->irq);
+		return ret;
+	}
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "failed to initialize host: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	.link_up = armada8k_pcie_link_up,
+};
+
+static int armada8k_pcie_probe(struct platform_device *pdev)
+{
+	struct dw_pcie *pci;
+	struct armada8k_pcie *pcie;
+	struct device *dev = &pdev->dev;
+	struct resource *base;
+	int ret;
+
+	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+	if (!pcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	pci->dev = dev;
+	pci->ops = &dw_pcie_ops;
+
+	pcie->pci = pci;
+
+	pcie->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(pcie->clk))
+		return PTR_ERR(pcie->clk);
+
+	ret = clk_prepare_enable(pcie->clk);
+	if (ret)
+		return ret;
+
+	pcie->clk_reg = devm_clk_get(dev, "reg");
+	if (pcie->clk_reg == ERR_PTR(-EPROBE_DEFER)) {
+		ret = -EPROBE_DEFER;
+		goto fail;
+	}
+	if (!IS_ERR(pcie->clk_reg)) {
+		ret = clk_prepare_enable(pcie->clk_reg);
+		if (ret)
+			goto fail_clkreg;
+	}
+
+	/* Get the dw-pcie unit configuration/control registers base. */
+	base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
+	pci->dbi_base = devm_pci_remap_cfg_resource(dev, base);
+	if (IS_ERR(pci->dbi_base)) {
+		dev_err(dev, "couldn't remap regs base %p\n", base);
+		ret = PTR_ERR(pci->dbi_base);
+		goto fail_clkreg;
+	}
+
+	ret = armada8k_pcie_setup_phys(pcie);
+	if (ret)
+		goto fail_clkreg;
+
+	platform_set_drvdata(pdev, pcie);
+
+	ret = armada8k_add_pcie_port(pcie, pdev);
+	if (ret)
+		goto disable_phy;
+
+	return 0;
+
+disable_phy:
+	armada8k_pcie_disable_phys(pcie);
+fail_clkreg:
+	clk_disable_unprepare(pcie->clk_reg);
+fail:
+	clk_disable_unprepare(pcie->clk);
+
+	return ret;
+}
+
+static const struct of_device_id armada8k_pcie_of_match[] = {
+	{ .compatible = "marvell,armada8k-pcie", },
+	{},
+};
+
+static struct platform_driver armada8k_pcie_driver = {
+	.probe		= armada8k_pcie_probe,
+	.driver = {
+		.name	= "armada8k-pcie",
+		.of_match_table = of_match_ptr(armada8k_pcie_of_match),
+		.suppress_bind_attrs = true,
+	},
+};
+builtin_platform_driver(armada8k_pcie_driver);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-artpec6.c b/marvell/linux/drivers/pci/controller/dwc/pcie-artpec6.c
new file mode 100644
index 0000000..d00252b
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -0,0 +1,617 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Axis ARTPEC-6 SoC
+ *
+ * Author: Niklas Cassel <niklas.cassel@axis.com>
+ *
+ * Based on work done by Phil Edworthy <phil@edworthys.org>
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include "pcie-designware.h"
+
+#define to_artpec6_pcie(x)	dev_get_drvdata((x)->dev)
+
+enum artpec_pcie_variants {
+	ARTPEC6,
+	ARTPEC7,
+};
+
+struct artpec6_pcie {
+	struct dw_pcie		*pci;
+	struct regmap		*regmap;	/* DT axis,syscon-pcie */
+	void __iomem		*phy_base;	/* DT phy */
+	enum artpec_pcie_variants variant;
+	enum dw_pcie_device_mode mode;
+};
+
+struct artpec_pcie_of_data {
+	enum artpec_pcie_variants variant;
+	enum dw_pcie_device_mode mode;
+};
+
+static const struct of_device_id artpec6_pcie_of_match[];
+
+/* PCIe Port Logic registers (memory-mapped) */
+#define PL_OFFSET			0x700
+
+#define ACK_F_ASPM_CTRL_OFF		(PL_OFFSET + 0xc)
+#define ACK_N_FTS_MASK			GENMASK(15, 8)
+#define ACK_N_FTS(x)			(((x) << 8) & ACK_N_FTS_MASK)
+
+#define FAST_TRAINING_SEQ_MASK		GENMASK(7, 0)
+#define FAST_TRAINING_SEQ(x)		(((x) << 0) & FAST_TRAINING_SEQ_MASK)
+
+/* ARTPEC-6 specific registers */
+#define PCIECFG				0x18
+#define  PCIECFG_DBG_OEN		BIT(24)
+#define  PCIECFG_CORE_RESET_REQ		BIT(21)
+#define  PCIECFG_LTSSM_ENABLE		BIT(20)
+#define  PCIECFG_DEVICE_TYPE_MASK	GENMASK(19, 16)
+#define  PCIECFG_CLKREQ_B		BIT(11)
+#define  PCIECFG_REFCLK_ENABLE		BIT(10)
+#define  PCIECFG_PLL_ENABLE		BIT(9)
+#define  PCIECFG_PCLK_ENABLE		BIT(8)
+#define  PCIECFG_RISRCREN		BIT(4)
+#define  PCIECFG_MODE_TX_DRV_EN		BIT(3)
+#define  PCIECFG_CISRREN		BIT(2)
+#define  PCIECFG_MACRO_ENABLE		BIT(0)
+/* ARTPEC-7 specific fields */
+#define  PCIECFG_REFCLKSEL		BIT(23)
+#define  PCIECFG_NOC_RESET		BIT(3)
+
+#define PCIESTAT			0x1c
+/* ARTPEC-7 specific fields */
+#define  PCIESTAT_EXTREFCLK		BIT(3)
+
+#define NOCCFG				0x40
+#define  NOCCFG_ENABLE_CLK_PCIE		BIT(4)
+#define  NOCCFG_POWER_PCIE_IDLEACK	BIT(3)
+#define  NOCCFG_POWER_PCIE_IDLE		BIT(2)
+#define  NOCCFG_POWER_PCIE_IDLEREQ	BIT(1)
+
+#define PHY_STATUS			0x118
+#define  PHY_COSPLLLOCK			BIT(0)
+
+#define PHY_TX_ASIC_OUT			0x4040
+#define  PHY_TX_ASIC_OUT_TX_ACK		BIT(0)
+
+#define PHY_RX_ASIC_OUT			0x405c
+#define  PHY_RX_ASIC_OUT_ACK		BIT(0)
+
+static u32 artpec6_pcie_readl(struct artpec6_pcie *artpec6_pcie, u32 offset)
+{
+	u32 val;
+
+	regmap_read(artpec6_pcie->regmap, offset, &val);
+	return val;
+}
+
+static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u32 val)
+{
+	regmap_write(artpec6_pcie->regmap, offset, val);
+}
+
+static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
+{
+	struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
+	struct pcie_port *pp = &pci->pp;
+	struct dw_pcie_ep *ep = &pci->ep;
+
+	switch (artpec6_pcie->mode) {
+	case DW_PCIE_RC_TYPE:
+		return pci_addr - pp->cfg0_base;
+	case DW_PCIE_EP_TYPE:
+		return pci_addr - ep->phys_base;
+	default:
+		dev_err(pci->dev, "UNKNOWN device type\n");
+	}
+	return pci_addr;
+}
+
+static int artpec6_pcie_establish_link(struct dw_pcie *pci)
+{
+	struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
+	u32 val;
+
+	val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+	val |= PCIECFG_LTSSM_ENABLE;
+	artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+
+	return 0;
+}
+
+static void artpec6_pcie_stop_link(struct dw_pcie *pci)
+{
+	struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
+	u32 val;
+
+	val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+	val &= ~PCIECFG_LTSSM_ENABLE;
+	artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	.cpu_addr_fixup = artpec6_pcie_cpu_addr_fixup,
+	.start_link = artpec6_pcie_establish_link,
+	.stop_link = artpec6_pcie_stop_link,
+};
+
+static void artpec6_pcie_wait_for_phy_a6(struct artpec6_pcie *artpec6_pcie)
+{
+	struct dw_pcie *pci = artpec6_pcie->pci;
+	struct device *dev = pci->dev;
+	u32 val;
+	unsigned int retries;
+
+	retries = 50;
+	do {
+		usleep_range(1000, 2000);
+		val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
+		retries--;
+	} while (retries &&
+		(val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE)));
+	if (!retries)
+		dev_err(dev, "PCIe clock manager did not leave idle state\n");
+
+	retries = 50;
+	do {
+		usleep_range(1000, 2000);
+		val = readl(artpec6_pcie->phy_base + PHY_STATUS);
+		retries--;
+	} while (retries && !(val & PHY_COSPLLLOCK));
+	if (!retries)
+		dev_err(dev, "PHY PLL did not lock\n");
+}
+
+static void artpec6_pcie_wait_for_phy_a7(struct artpec6_pcie *artpec6_pcie)
+{
+	struct dw_pcie *pci = artpec6_pcie->pci;
+	struct device *dev = pci->dev;
+	u32 val;
+	u16 phy_status_tx, phy_status_rx;
+	unsigned int retries;
+
+	retries = 50;
+	do {
+		usleep_range(1000, 2000);
+		val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
+		retries--;
+	} while (retries &&
+		(val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE)));
+	if (!retries)
+		dev_err(dev, "PCIe clock manager did not leave idle state\n");
+
+	retries = 50;
+	do {
+		usleep_range(1000, 2000);
+		phy_status_tx = readw(artpec6_pcie->phy_base + PHY_TX_ASIC_OUT);
+		phy_status_rx = readw(artpec6_pcie->phy_base + PHY_RX_ASIC_OUT);
+		retries--;
+	} while (retries && ((phy_status_tx & PHY_TX_ASIC_OUT_TX_ACK) ||
+				(phy_status_rx & PHY_RX_ASIC_OUT_ACK)));
+	if (!retries)
+		dev_err(dev, "PHY did not enter Pn state\n");
+}
+
+static void artpec6_pcie_wait_for_phy(struct artpec6_pcie *artpec6_pcie)
+{
+	switch (artpec6_pcie->variant) {
+	case ARTPEC6:
+		artpec6_pcie_wait_for_phy_a6(artpec6_pcie);
+		break;
+	case ARTPEC7:
+		artpec6_pcie_wait_for_phy_a7(artpec6_pcie);
+		break;
+	}
+}
+
+static void artpec6_pcie_init_phy_a6(struct artpec6_pcie *artpec6_pcie)
+{
+	u32 val;
+
+	val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+	val |=  PCIECFG_RISRCREN |	/* Receiver term. 50 Ohm */
+		PCIECFG_MODE_TX_DRV_EN |
+		PCIECFG_CISRREN |	/* Reference clock term. 100 Ohm */
+		PCIECFG_MACRO_ENABLE;
+	val |= PCIECFG_REFCLK_ENABLE;
+	val &= ~PCIECFG_DBG_OEN;
+	val &= ~PCIECFG_CLKREQ_B;
+	artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+	usleep_range(5000, 6000);
+
+	val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
+	val |= NOCCFG_ENABLE_CLK_PCIE;
+	artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
+	usleep_range(20, 30);
+
+	val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+	val |= PCIECFG_PCLK_ENABLE | PCIECFG_PLL_ENABLE;
+	artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+	usleep_range(6000, 7000);
+
+	val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
+	val &= ~NOCCFG_POWER_PCIE_IDLEREQ;
+	artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
+}
+
+static void artpec6_pcie_init_phy_a7(struct artpec6_pcie *artpec6_pcie)
+{
+	struct dw_pcie *pci = artpec6_pcie->pci;
+	u32 val;
+	bool extrefclk;
+
+	/* Check if external reference clock is connected */
+	val = artpec6_pcie_readl(artpec6_pcie, PCIESTAT);
+	extrefclk = !!(val & PCIESTAT_EXTREFCLK);
+	dev_dbg(pci->dev, "Using reference clock: %s\n",
+		extrefclk ? "external" : "internal");
+
+	val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+	val |=  PCIECFG_RISRCREN |	/* Receiver term. 50 Ohm */
+		PCIECFG_PCLK_ENABLE;
+	if (extrefclk)
+		val |= PCIECFG_REFCLKSEL;
+	else
+		val &= ~PCIECFG_REFCLKSEL;
+	artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+	usleep_range(10, 20);
+
+	val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
+	val |= NOCCFG_ENABLE_CLK_PCIE;
+	artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
+	usleep_range(20, 30);
+
+	val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
+	val &= ~NOCCFG_POWER_PCIE_IDLEREQ;
+	artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
+}
+
+static void artpec6_pcie_init_phy(struct artpec6_pcie *artpec6_pcie)
+{
+	switch (artpec6_pcie->variant) {
+	case ARTPEC6:
+		artpec6_pcie_init_phy_a6(artpec6_pcie);
+		break;
+	case ARTPEC7:
+		artpec6_pcie_init_phy_a7(artpec6_pcie);
+		break;
+	}
+}
+
+static void artpec6_pcie_set_nfts(struct artpec6_pcie *artpec6_pcie)
+{
+	struct dw_pcie *pci = artpec6_pcie->pci;
+	u32 val;
+
+	if (artpec6_pcie->variant != ARTPEC7)
+		return;
+
+	/*
+	 * Increase the N_FTS (Number of Fast Training Sequences)
+	 * to be transmitted when transitioning from L0s to L0.
+	 */
+	val = dw_pcie_readl_dbi(pci, ACK_F_ASPM_CTRL_OFF);
+	val &= ~ACK_N_FTS_MASK;
+	val |= ACK_N_FTS(180);
+	dw_pcie_writel_dbi(pci, ACK_F_ASPM_CTRL_OFF, val);
+
+	/*
+	 * Set the Number of Fast Training Sequences that the core
+	 * advertises as its N_FTS during Gen2 or Gen3 link training.
+	 */
+	val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+	val &= ~FAST_TRAINING_SEQ_MASK;
+	val |= FAST_TRAINING_SEQ(180);
+	dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+}
+
+static void artpec6_pcie_assert_core_reset(struct artpec6_pcie *artpec6_pcie)
+{
+	u32 val;
+
+	val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+	switch (artpec6_pcie->variant) {
+	case ARTPEC6:
+		val |= PCIECFG_CORE_RESET_REQ;
+		break;
+	case ARTPEC7:
+		val &= ~PCIECFG_NOC_RESET;
+		break;
+	}
+	artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+}
+
+static void artpec6_pcie_deassert_core_reset(struct artpec6_pcie *artpec6_pcie)
+{
+	u32 val;
+
+	val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+	switch (artpec6_pcie->variant) {
+	case ARTPEC6:
+		val &= ~PCIECFG_CORE_RESET_REQ;
+		break;
+	case ARTPEC7:
+		val |= PCIECFG_NOC_RESET;
+		break;
+	}
+	artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+	usleep_range(100, 200);
+}
+
+static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie)
+{
+	struct dw_pcie *pci = artpec6_pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		dw_pcie_msi_init(pp);
+}
+
+static int artpec6_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
+
+	artpec6_pcie_assert_core_reset(artpec6_pcie);
+	artpec6_pcie_init_phy(artpec6_pcie);
+	artpec6_pcie_deassert_core_reset(artpec6_pcie);
+	artpec6_pcie_wait_for_phy(artpec6_pcie);
+	artpec6_pcie_set_nfts(artpec6_pcie);
+	dw_pcie_setup_rc(pp);
+	artpec6_pcie_establish_link(pci);
+	dw_pcie_wait_for_link(pci);
+	artpec6_pcie_enable_interrupts(artpec6_pcie);
+
+	return 0;
+}
+
+static const struct dw_pcie_host_ops artpec6_pcie_host_ops = {
+	.host_init = artpec6_pcie_host_init,
+};
+
+static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
+				 struct platform_device *pdev)
+{
+	struct dw_pcie *pci = artpec6_pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = pci->dev;
+	int ret;
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		pp->msi_irq = platform_get_irq_byname(pdev, "msi");
+		if (pp->msi_irq < 0) {
+			dev_err(dev, "failed to get MSI irq\n");
+			return pp->msi_irq;
+		}
+	}
+
+	pp->ops = &artpec6_pcie_host_ops;
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "failed to initialize host\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
+	enum pci_barno bar;
+
+	artpec6_pcie_assert_core_reset(artpec6_pcie);
+	artpec6_pcie_init_phy(artpec6_pcie);
+	artpec6_pcie_deassert_core_reset(artpec6_pcie);
+	artpec6_pcie_wait_for_phy(artpec6_pcie);
+	artpec6_pcie_set_nfts(artpec6_pcie);
+
+	for (bar = BAR_0; bar <= BAR_5; bar++)
+		dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+				  enum pci_epc_irq_type type, u16 interrupt_num)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	switch (type) {
+	case PCI_EPC_IRQ_LEGACY:
+		dev_err(pci->dev, "EP cannot trigger legacy IRQs\n");
+		return -EINVAL;
+	case PCI_EPC_IRQ_MSI:
+		return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+	default:
+		dev_err(pci->dev, "UNKNOWN IRQ type\n");
+	}
+
+	return 0;
+}
+
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+	.ep_init = artpec6_pcie_ep_init,
+	.raise_irq = artpec6_pcie_raise_irq,
+};
+
+static int artpec6_add_pcie_ep(struct artpec6_pcie *artpec6_pcie,
+			       struct platform_device *pdev)
+{
+	int ret;
+	struct dw_pcie_ep *ep;
+	struct resource *res;
+	struct device *dev = &pdev->dev;
+	struct dw_pcie *pci = artpec6_pcie->pci;
+
+	ep = &pci->ep;
+	ep->ops = &pcie_ep_ops;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
+	pci->dbi_base2 = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pci->dbi_base2))
+		return PTR_ERR(pci->dbi_base2);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+	if (!res)
+		return -EINVAL;
+
+	ep->phys_base = res->start;
+	ep->addr_size = resource_size(res);
+
+	ret = dw_pcie_ep_init(ep);
+	if (ret) {
+		dev_err(dev, "failed to initialize endpoint\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int artpec6_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dw_pcie *pci;
+	struct artpec6_pcie *artpec6_pcie;
+	struct resource *dbi_base;
+	struct resource *phy_base;
+	int ret;
+	const struct of_device_id *match;
+	const struct artpec_pcie_of_data *data;
+	enum artpec_pcie_variants variant;
+	enum dw_pcie_device_mode mode;
+
+	match = of_match_device(artpec6_pcie_of_match, dev);
+	if (!match)
+		return -EINVAL;
+
+	data = (struct artpec_pcie_of_data *)match->data;
+	variant = (enum artpec_pcie_variants)data->variant;
+	mode = (enum dw_pcie_device_mode)data->mode;
+
+	artpec6_pcie = devm_kzalloc(dev, sizeof(*artpec6_pcie), GFP_KERNEL);
+	if (!artpec6_pcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	pci->dev = dev;
+	pci->ops = &dw_pcie_ops;
+
+	artpec6_pcie->pci = pci;
+	artpec6_pcie->variant = variant;
+	artpec6_pcie->mode = mode;
+
+	dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+	pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
+	if (IS_ERR(pci->dbi_base))
+		return PTR_ERR(pci->dbi_base);
+
+	phy_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
+	artpec6_pcie->phy_base = devm_ioremap_resource(dev, phy_base);
+	if (IS_ERR(artpec6_pcie->phy_base))
+		return PTR_ERR(artpec6_pcie->phy_base);
+
+	artpec6_pcie->regmap =
+		syscon_regmap_lookup_by_phandle(dev->of_node,
+						"axis,syscon-pcie");
+	if (IS_ERR(artpec6_pcie->regmap))
+		return PTR_ERR(artpec6_pcie->regmap);
+
+	platform_set_drvdata(pdev, artpec6_pcie);
+
+	switch (artpec6_pcie->mode) {
+	case DW_PCIE_RC_TYPE:
+		if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_HOST))
+			return -ENODEV;
+
+		ret = artpec6_add_pcie_port(artpec6_pcie, pdev);
+		if (ret < 0)
+			return ret;
+		break;
+	case DW_PCIE_EP_TYPE: {
+		u32 val;
+
+		if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_EP))
+			return -ENODEV;
+
+		val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+		val &= ~PCIECFG_DEVICE_TYPE_MASK;
+		artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+		ret = artpec6_add_pcie_ep(artpec6_pcie, pdev);
+		if (ret < 0)
+			return ret;
+		break;
+	}
+	default:
+		dev_err(dev, "INVALID device type %d\n", artpec6_pcie->mode);
+	}
+
+	return 0;
+}
+
+static const struct artpec_pcie_of_data artpec6_pcie_rc_of_data = {
+	.variant = ARTPEC6,
+	.mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct artpec_pcie_of_data artpec6_pcie_ep_of_data = {
+	.variant = ARTPEC6,
+	.mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct artpec_pcie_of_data artpec7_pcie_rc_of_data = {
+	.variant = ARTPEC7,
+	.mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct artpec_pcie_of_data artpec7_pcie_ep_of_data = {
+	.variant = ARTPEC7,
+	.mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct of_device_id artpec6_pcie_of_match[] = {
+	{
+		.compatible = "axis,artpec6-pcie",
+		.data = &artpec6_pcie_rc_of_data,
+	},
+	{
+		.compatible = "axis,artpec6-pcie-ep",
+		.data = &artpec6_pcie_ep_of_data,
+	},
+	{
+		.compatible = "axis,artpec7-pcie",
+		.data = &artpec7_pcie_rc_of_data,
+	},
+	{
+		.compatible = "axis,artpec7-pcie-ep",
+		.data = &artpec7_pcie_ep_of_data,
+	},
+	{},
+};
+
+static struct platform_driver artpec6_pcie_driver = {
+	.probe = artpec6_pcie_probe,
+	.driver = {
+		.name	= "artpec6-pcie",
+		.of_match_table = artpec6_pcie_of_match,
+		.suppress_bind_attrs = true,
+	},
+};
+builtin_platform_driver(artpec6_pcie_driver);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-designware-ep.c b/marvell/linux/drivers/pci/controller/dwc/pcie-designware-ep.c
new file mode 100644
index 0000000..d6d9667
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -0,0 +1,611 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Synopsys DesignWare PCIe Endpoint controller driver
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+#include <linux/of.h>
+
+#include "pcie-designware.h"
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
+void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
+{
+	struct pci_epc *epc = ep->epc;
+
+	pci_epc_linkup(epc);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
+
+static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar,
+				   int flags)
+{
+	u32 reg;
+
+	reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+	dw_pcie_dbi_ro_wr_en(pci);
+	dw_pcie_writel_dbi2(pci, reg, 0x0);
+	dw_pcie_writel_dbi(pci, reg, 0x0);
+	if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+		dw_pcie_writel_dbi2(pci, reg + 4, 0x0);
+		dw_pcie_writel_dbi(pci, reg + 4, 0x0);
+	}
+	dw_pcie_dbi_ro_wr_dis(pci);
+}
+
+void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
+{
+	__dw_pcie_ep_reset_bar(pci, bar, 0);
+}
+
+static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
+				   struct pci_epf_header *hdr)
+{
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	dw_pcie_dbi_ro_wr_en(pci);
+	dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, hdr->vendorid);
+	dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, hdr->deviceid);
+	dw_pcie_writeb_dbi(pci, PCI_REVISION_ID, hdr->revid);
+	dw_pcie_writeb_dbi(pci, PCI_CLASS_PROG, hdr->progif_code);
+	dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE,
+			   hdr->subclass_code | hdr->baseclass_code << 8);
+	dw_pcie_writeb_dbi(pci, PCI_CACHE_LINE_SIZE,
+			   hdr->cache_line_size);
+	dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_VENDOR_ID,
+			   hdr->subsys_vendor_id);
+	dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_ID, hdr->subsys_id);
+	dw_pcie_writeb_dbi(pci, PCI_INTERRUPT_PIN,
+			   hdr->interrupt_pin);
+	dw_pcie_dbi_ro_wr_dis(pci);
+
+	return 0;
+}
+
+static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar,
+				  dma_addr_t cpu_addr,
+				  enum dw_pcie_as_type as_type)
+{
+	int ret;
+	u32 free_win;
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	free_win = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows);
+	if (free_win >= ep->num_ib_windows) {
+		dev_err(pci->dev, "No free inbound window\n");
+		return -EINVAL;
+	}
+
+	ret = dw_pcie_prog_inbound_atu(pci, free_win, bar, cpu_addr,
+				       as_type);
+	if (ret < 0) {
+		dev_err(pci->dev, "Failed to program IB window\n");
+		return ret;
+	}
+
+	ep->bar_to_atu[bar] = free_win;
+	set_bit(free_win, ep->ib_window_map);
+
+	return 0;
+}
+
+static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr,
+				   u64 pci_addr, size_t size)
+{
+	u32 free_win;
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows);
+	if (free_win >= ep->num_ob_windows) {
+		dev_err(pci->dev, "No free outbound window\n");
+		return -EINVAL;
+	}
+
+	dw_pcie_prog_outbound_atu(pci, free_win, PCIE_ATU_TYPE_MEM,
+				  phys_addr, pci_addr, size);
+
+	set_bit(free_win, ep->ob_window_map);
+	ep->outbound_addr[free_win] = phys_addr;
+
+	return 0;
+}
+
+static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
+				 struct pci_epf_bar *epf_bar)
+{
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	enum pci_barno bar = epf_bar->barno;
+	u32 atu_index = ep->bar_to_atu[bar];
+
+	__dw_pcie_ep_reset_bar(pci, bar, epf_bar->flags);
+
+	dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
+	clear_bit(atu_index, ep->ib_window_map);
+}
+
+static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
+			      struct pci_epf_bar *epf_bar)
+{
+	int ret;
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	enum pci_barno bar = epf_bar->barno;
+	size_t size = epf_bar->size;
+	int flags = epf_bar->flags;
+	enum dw_pcie_as_type as_type;
+	u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+
+	if (!(flags & PCI_BASE_ADDRESS_SPACE))
+		as_type = DW_PCIE_AS_MEM;
+	else
+		as_type = DW_PCIE_AS_IO;
+
+	ret = dw_pcie_ep_inbound_atu(ep, bar, epf_bar->phys_addr, as_type);
+	if (ret)
+		return ret;
+
+	dw_pcie_dbi_ro_wr_en(pci);
+
+	dw_pcie_writel_dbi2(pci, reg, lower_32_bits(size - 1));
+	dw_pcie_writel_dbi(pci, reg, flags);
+
+	if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+		dw_pcie_writel_dbi2(pci, reg + 4, upper_32_bits(size - 1));
+		dw_pcie_writel_dbi(pci, reg + 4, 0);
+	}
+
+	dw_pcie_dbi_ro_wr_dis(pci);
+
+	return 0;
+}
+
+static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
+			      u32 *atu_index)
+{
+	u32 index;
+
+	for (index = 0; index < ep->num_ob_windows; index++) {
+		if (ep->outbound_addr[index] != addr)
+			continue;
+		*atu_index = index;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
+				  phys_addr_t addr)
+{
+	int ret;
+	u32 atu_index;
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	ret = dw_pcie_find_index(ep, addr, &atu_index);
+	if (ret < 0)
+		return;
+
+	dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND);
+	clear_bit(atu_index, ep->ob_window_map);
+}
+
+static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
+			       phys_addr_t addr,
+			       u64 pci_addr, size_t size)
+{
+	int ret;
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	ret = dw_pcie_ep_outbound_atu(ep, addr, pci_addr, size);
+	if (ret) {
+		dev_err(pci->dev, "Failed to enable address\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
+{
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	u32 val, reg;
+
+	if (!ep->msi_cap)
+		return -EINVAL;
+
+	reg = ep->msi_cap + PCI_MSI_FLAGS;
+	val = dw_pcie_readw_dbi(pci, reg);
+	if (!(val & PCI_MSI_FLAGS_ENABLE))
+		return -EINVAL;
+
+	val = (val & PCI_MSI_FLAGS_QSIZE) >> 4;
+
+	return val;
+}
+
+static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
+{
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	u32 val, reg;
+
+	if (!ep->msi_cap)
+		return -EINVAL;
+
+	reg = ep->msi_cap + PCI_MSI_FLAGS;
+	val = dw_pcie_readw_dbi(pci, reg);
+	val &= ~PCI_MSI_FLAGS_QMASK;
+	val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK;
+	dw_pcie_dbi_ro_wr_en(pci);
+	dw_pcie_writew_dbi(pci, reg, val);
+	dw_pcie_dbi_ro_wr_dis(pci);
+
+	return 0;
+}
+
+static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
+{
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	u32 val, reg;
+
+	if (!ep->msix_cap)
+		return -EINVAL;
+
+	reg = ep->msix_cap + PCI_MSIX_FLAGS;
+	val = dw_pcie_readw_dbi(pci, reg);
+	if (!(val & PCI_MSIX_FLAGS_ENABLE))
+		return -EINVAL;
+
+	val &= PCI_MSIX_FLAGS_QSIZE;
+
+	return val;
+}
+
+static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
+{
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	u32 val, reg;
+
+	if (!ep->msix_cap)
+		return -EINVAL;
+
+	reg = ep->msix_cap + PCI_MSIX_FLAGS;
+	val = dw_pcie_readw_dbi(pci, reg);
+	val &= ~PCI_MSIX_FLAGS_QSIZE;
+	val |= interrupts;
+	dw_pcie_dbi_ro_wr_en(pci);
+	dw_pcie_writew_dbi(pci, reg, val);
+	dw_pcie_dbi_ro_wr_dis(pci);
+
+	return 0;
+}
+
+static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
+				enum pci_epc_irq_type type, u16 interrupt_num)
+{
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+
+	if (!ep->ops->raise_irq)
+		return -EINVAL;
+
+	return ep->ops->raise_irq(ep, func_no, type, interrupt_num);
+}
+
+static void dw_pcie_ep_stop(struct pci_epc *epc)
+{
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	if (!pci->ops->stop_link)
+		return;
+
+	pci->ops->stop_link(pci);
+}
+
+static int dw_pcie_ep_start(struct pci_epc *epc)
+{
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	if (!pci->ops->start_link)
+		return -EINVAL;
+
+	return pci->ops->start_link(pci);
+}
+
+static const struct pci_epc_features*
+dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+{
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+
+	if (!ep->ops->get_features)
+		return NULL;
+
+	return ep->ops->get_features(ep);
+}
+
+static const struct pci_epc_ops epc_ops = {
+	.write_header		= dw_pcie_ep_write_header,
+	.set_bar		= dw_pcie_ep_set_bar,
+	.clear_bar		= dw_pcie_ep_clear_bar,
+	.map_addr		= dw_pcie_ep_map_addr,
+	.unmap_addr		= dw_pcie_ep_unmap_addr,
+	.set_msi		= dw_pcie_ep_set_msi,
+	.get_msi		= dw_pcie_ep_get_msi,
+	.set_msix		= dw_pcie_ep_set_msix,
+	.get_msix		= dw_pcie_ep_get_msix,
+	.raise_irq		= dw_pcie_ep_raise_irq,
+	.start			= dw_pcie_ep_start,
+	.stop			= dw_pcie_ep_stop,
+	.get_features		= dw_pcie_ep_get_features,
+};
+
+int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	struct device *dev = pci->dev;
+
+	dev_err(dev, "EP cannot trigger legacy IRQs\n");
+
+	return -EINVAL;
+}
+
+int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
+			     u8 interrupt_num)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	struct pci_epc *epc = ep->epc;
+	unsigned int aligned_offset;
+	u16 msg_ctrl, msg_data;
+	u32 msg_addr_lower, msg_addr_upper, reg;
+	u64 msg_addr;
+	bool has_upper;
+	int ret;
+
+	if (!ep->msi_cap)
+		return -EINVAL;
+
+	/* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
+	reg = ep->msi_cap + PCI_MSI_FLAGS;
+	msg_ctrl = dw_pcie_readw_dbi(pci, reg);
+	has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
+	reg = ep->msi_cap + PCI_MSI_ADDRESS_LO;
+	msg_addr_lower = dw_pcie_readl_dbi(pci, reg);
+	if (has_upper) {
+		reg = ep->msi_cap + PCI_MSI_ADDRESS_HI;
+		msg_addr_upper = dw_pcie_readl_dbi(pci, reg);
+		reg = ep->msi_cap + PCI_MSI_DATA_64;
+		msg_data = dw_pcie_readw_dbi(pci, reg);
+	} else {
+		msg_addr_upper = 0;
+		reg = ep->msi_cap + PCI_MSI_DATA_32;
+		msg_data = dw_pcie_readw_dbi(pci, reg);
+	}
+	aligned_offset = msg_addr_lower & (epc->mem->page_size - 1);
+	msg_addr = ((u64)msg_addr_upper) << 32 |
+			(msg_addr_lower & ~aligned_offset);
+	ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
+				  epc->mem->page_size);
+	if (ret)
+		return ret;
+
+	writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset);
+
+	dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
+
+	return 0;
+}
+
+int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+			     u16 interrupt_num)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	struct pci_epc *epc = ep->epc;
+	u16 tbl_offset, bir;
+	u32 bar_addr_upper, bar_addr_lower;
+	u32 msg_addr_upper, msg_addr_lower;
+	u32 reg, msg_data, vec_ctrl;
+	u64 tbl_addr, msg_addr, reg_u64;
+	void __iomem *msix_tbl;
+	int ret;
+
+	reg = ep->msix_cap + PCI_MSIX_TABLE;
+	tbl_offset = dw_pcie_readl_dbi(pci, reg);
+	bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
+	tbl_offset &= PCI_MSIX_TABLE_OFFSET;
+
+	reg = PCI_BASE_ADDRESS_0 + (4 * bir);
+	bar_addr_upper = 0;
+	bar_addr_lower = dw_pcie_readl_dbi(pci, reg);
+	reg_u64 = (bar_addr_lower & PCI_BASE_ADDRESS_MEM_TYPE_MASK);
+	if (reg_u64 == PCI_BASE_ADDRESS_MEM_TYPE_64)
+		bar_addr_upper = dw_pcie_readl_dbi(pci, reg + 4);
+
+	tbl_addr = ((u64) bar_addr_upper) << 32 | bar_addr_lower;
+	tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE));
+	tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK;
+
+	msix_tbl = ioremap_nocache(ep->phys_base + tbl_addr,
+				   PCI_MSIX_ENTRY_SIZE);
+	if (!msix_tbl)
+		return -EINVAL;
+
+	msg_addr_lower = readl(msix_tbl + PCI_MSIX_ENTRY_LOWER_ADDR);
+	msg_addr_upper = readl(msix_tbl + PCI_MSIX_ENTRY_UPPER_ADDR);
+	msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
+	msg_data = readl(msix_tbl + PCI_MSIX_ENTRY_DATA);
+	vec_ctrl = readl(msix_tbl + PCI_MSIX_ENTRY_VECTOR_CTRL);
+
+	iounmap(msix_tbl);
+
+	if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) {
+		dev_dbg(pci->dev, "MSI-X entry ctrl set\n");
+		return -EPERM;
+	}
+
+	ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
+				  epc->mem->page_size);
+	if (ret)
+		return ret;
+
+	writel(msg_data, ep->msi_mem);
+
+	dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
+
+	return 0;
+}
+
+void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+{
+	struct pci_epc *epc = ep->epc;
+
+	pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
+			      epc->mem->page_size);
+
+	pci_epc_mem_exit(epc);
+}
+
+static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
+{
+	u32 header;
+	int pos = PCI_CFG_SPACE_SIZE;
+
+	while (pos) {
+		header = dw_pcie_readl_dbi(pci, pos);
+		if (PCI_EXT_CAP_ID(header) == cap)
+			return pos;
+
+		pos = PCI_EXT_CAP_NEXT(header);
+		if (!pos)
+			break;
+	}
+
+	return 0;
+}
+
+int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+	int i;
+	int ret;
+	u32 reg;
+	void *addr;
+	u8 hdr_type;
+	unsigned int nbars;
+	unsigned int offset;
+	struct pci_epc *epc;
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	struct device *dev = pci->dev;
+	struct device_node *np = dev->of_node;
+
+	if (!pci->dbi_base || !pci->dbi_base2) {
+		dev_err(dev, "dbi_base/dbi_base2 is not populated\n");
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows);
+	if (ret < 0) {
+		dev_err(dev, "Unable to read *num-ib-windows* property\n");
+		return ret;
+	}
+	if (ep->num_ib_windows > MAX_IATU_IN) {
+		dev_err(dev, "Invalid *num-ib-windows*\n");
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows);
+	if (ret < 0) {
+		dev_err(dev, "Unable to read *num-ob-windows* property\n");
+		return ret;
+	}
+	if (ep->num_ob_windows > MAX_IATU_OUT) {
+		dev_err(dev, "Invalid *num-ob-windows*\n");
+		return -EINVAL;
+	}
+
+	ep->ib_window_map = devm_kcalloc(dev,
+					 BITS_TO_LONGS(ep->num_ib_windows),
+					 sizeof(long),
+					 GFP_KERNEL);
+	if (!ep->ib_window_map)
+		return -ENOMEM;
+
+	ep->ob_window_map = devm_kcalloc(dev,
+					 BITS_TO_LONGS(ep->num_ob_windows),
+					 sizeof(long),
+					 GFP_KERNEL);
+	if (!ep->ob_window_map)
+		return -ENOMEM;
+
+	addr = devm_kcalloc(dev, ep->num_ob_windows, sizeof(phys_addr_t),
+			    GFP_KERNEL);
+	if (!addr)
+		return -ENOMEM;
+	ep->outbound_addr = addr;
+
+	epc = devm_pci_epc_create(dev, &epc_ops);
+	if (IS_ERR(epc)) {
+		dev_err(dev, "Failed to create epc device\n");
+		return PTR_ERR(epc);
+	}
+
+	ep->epc = epc;
+	epc_set_drvdata(epc, ep);
+
+	if (ep->ops->ep_init)
+		ep->ops->ep_init(ep);
+
+	hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE);
+	if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
+		dev_err(pci->dev, "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
+			hdr_type);
+		return -EIO;
+	}
+
+	ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
+	if (ret < 0)
+		epc->max_functions = 1;
+
+	ret = __pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
+				 ep->page_size);
+	if (ret < 0) {
+		dev_err(dev, "Failed to initialize address space\n");
+		return ret;
+	}
+
+	ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
+					     epc->mem->page_size);
+	if (!ep->msi_mem) {
+		dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
+		return -ENOMEM;
+	}
+	ep->msi_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
+
+	ep->msix_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSIX);
+
+	offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
+	if (offset) {
+		reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+		nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
+			PCI_REBAR_CTRL_NBAR_SHIFT;
+
+		dw_pcie_dbi_ro_wr_en(pci);
+		for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
+			dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
+		dw_pcie_dbi_ro_wr_dis(pci);
+	}
+
+	dw_pcie_setup(pci);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-designware-host.c b/marvell/linux/drivers/pci/controller/dwc/pcie-designware-host.c
new file mode 100644
index 0000000..43106d9
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -0,0 +1,726 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synopsys DesignWare PCIe host controller driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ */
+
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/pci_regs.h>
+#include <linux/platform_device.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+static struct pci_ops dw_pcie_ops;
+
+static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
+			       u32 *val)
+{
+	struct dw_pcie *pci;
+
+	if (pp->ops->rd_own_conf)
+		return pp->ops->rd_own_conf(pp, where, size, val);
+
+	pci = to_dw_pcie_from_pp(pp);
+	return dw_pcie_read(pci->dbi_base + where, size, val);
+}
+
+static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
+			       u32 val)
+{
+	struct dw_pcie *pci;
+
+	if (pp->ops->wr_own_conf)
+		return pp->ops->wr_own_conf(pp, where, size, val);
+
+	pci = to_dw_pcie_from_pp(pp);
+	return dw_pcie_write(pci->dbi_base + where, size, val);
+}
+
+#ifdef CONFIG_PCI_MSI
+static void dw_msi_ack_irq(struct irq_data *d)
+{
+	irq_chip_ack_parent(d);
+}
+
+static void dw_msi_mask_irq(struct irq_data *d)
+{
+	pci_msi_mask_irq(d);
+	irq_chip_mask_parent(d);
+}
+
+static void dw_msi_unmask_irq(struct irq_data *d)
+{
+	pci_msi_unmask_irq(d);
+	irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip dw_pcie_msi_irq_chip = {
+	.name = "PCI-MSI",
+	.irq_ack = dw_msi_ack_irq,
+	.irq_mask = dw_msi_mask_irq,
+	.irq_unmask = dw_msi_unmask_irq,
+};
+
+static struct msi_domain_info dw_pcie_msi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+		   MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
+	.chip	= &dw_pcie_msi_irq_chip,
+};
+#endif
+/* MSI int handler */
+irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
+{
+	int i, pos, irq;
+	unsigned long val;
+	u32 status, num_ctrls;
+	irqreturn_t ret = IRQ_NONE;
+#ifdef CONFIG_PCIE_ASR1901
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	pm_wakeup_event(pci->dev, 2000);
+#endif
+	num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+	for (i = 0; i < num_ctrls; i++) {
+		dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS +
+					(i * MSI_REG_CTRL_BLOCK_SIZE),
+				    4, &status);
+		if (!status)
+			continue;
+
+		ret = IRQ_HANDLED;
+		val = status;
+		pos = 0;
+		while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
+					    pos)) != MAX_MSI_IRQS_PER_CTRL) {
+			irq = irq_find_mapping(pp->irq_domain,
+					       (i * MAX_MSI_IRQS_PER_CTRL) +
+					       pos);
+			generic_handle_irq(irq);
+			pos++;
+		}
+	}
+
+	return ret;
+}
+
+/* Chained MSI interrupt service routine */
+static void dw_chained_msi_isr(struct irq_desc *desc)
+{
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct pcie_port *pp;
+
+	chained_irq_enter(chip, desc);
+
+	pp = irq_desc_get_handler_data(desc);
+	dw_handle_msi_irq(pp);
+
+	chained_irq_exit(chip, desc);
+}
+
+static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+	struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	u64 msi_target;
+
+	msi_target = (u64)pp->msi_data;
+
+	msg->address_lo = lower_32_bits(msi_target);
+	msg->address_hi = upper_32_bits(msi_target);
+
+	msg->data = d->hwirq;
+
+	dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
+		(int)d->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static int dw_pci_msi_set_affinity(struct irq_data *d,
+				   const struct cpumask *mask, bool force)
+{
+	return -EINVAL;
+}
+
+static void dw_pci_bottom_mask(struct irq_data *d)
+{
+	struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+	unsigned int res, bit, ctrl;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&pp->lock, flags);
+
+	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+
+	pp->irq_mask[ctrl] |= BIT(bit);
+	dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
+			    pp->irq_mask[ctrl]);
+
+	raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static void dw_pci_bottom_unmask(struct irq_data *d)
+{
+	struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+	unsigned int res, bit, ctrl;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&pp->lock, flags);
+
+	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+
+	pp->irq_mask[ctrl] &= ~BIT(bit);
+	dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
+			    pp->irq_mask[ctrl]);
+
+	raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static void dw_pci_bottom_ack(struct irq_data *d)
+{
+	struct pcie_port *pp  = irq_data_get_irq_chip_data(d);
+	unsigned int res, bit, ctrl;
+
+	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+
+	dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, BIT(bit));
+}
+
+static struct irq_chip dw_pci_msi_bottom_irq_chip = {
+	.name = "DWPCI-MSI",
+	.irq_ack = dw_pci_bottom_ack,
+	.irq_compose_msi_msg = dw_pci_setup_msi_msg,
+	.irq_set_affinity = dw_pci_msi_set_affinity,
+	.irq_mask = dw_pci_bottom_mask,
+	.irq_unmask = dw_pci_bottom_unmask,
+};
+
+static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
+				    unsigned int virq, unsigned int nr_irqs,
+				    void *args)
+{
+	struct pcie_port *pp = domain->host_data;
+	unsigned long flags;
+	u32 i;
+	int bit;
+
+	raw_spin_lock_irqsave(&pp->lock, flags);
+
+	bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
+				      order_base_2(nr_irqs));
+
+	raw_spin_unlock_irqrestore(&pp->lock, flags);
+
+	if (bit < 0)
+		return -ENOSPC;
+
+	for (i = 0; i < nr_irqs; i++)
+		irq_domain_set_info(domain, virq + i, bit + i,
+				    pp->msi_irq_chip,
+				    pp, handle_edge_irq,
+				    NULL, NULL);
+
+	return 0;
+}
+
+static void dw_pcie_irq_domain_free(struct irq_domain *domain,
+				    unsigned int virq, unsigned int nr_irqs)
+{
+	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+	struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&pp->lock, flags);
+
+	bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
+			      order_base_2(nr_irqs));
+
+	raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
+	.alloc	= dw_pcie_irq_domain_alloc,
+	.free	= dw_pcie_irq_domain_free,
+};
+
+int dw_pcie_allocate_domains(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
+
+	pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
+					       &dw_pcie_msi_domain_ops, pp);
+	if (!pp->irq_domain) {
+		dev_err(pci->dev, "Failed to create IRQ domain\n");
+		return -ENOMEM;
+	}
+
+	irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
+#ifdef CONFIG_PCI_MSI
+	pp->msi_domain = pci_msi_create_irq_domain(fwnode,
+						   &dw_pcie_msi_domain_info,
+						   pp->irq_domain);
+	if (!pp->msi_domain) {
+		dev_err(pci->dev, "Failed to create MSI domain\n");
+		irq_domain_remove(pp->irq_domain);
+		return -ENOMEM;
+	}
+#endif
+	return 0;
+}
+
+void dw_pcie_free_msi(struct pcie_port *pp)
+{
+	if (pp->msi_irq) {
+		irq_set_chained_handler(pp->msi_irq, NULL);
+		irq_set_handler_data(pp->msi_irq, NULL);
+	}
+
+	irq_domain_remove(pp->msi_domain);
+	irq_domain_remove(pp->irq_domain);
+
+	if (pp->msi_page)
+		__free_page(pp->msi_page);
+}
+
+void dw_pcie_msi_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct device *dev = pci->dev;
+	u64 msi_target;
+
+	pp->msi_page = alloc_page(GFP_KERNEL);
+	pp->msi_data = dma_map_page(dev, pp->msi_page, 0, PAGE_SIZE,
+				    DMA_FROM_DEVICE);
+	if (dma_mapping_error(dev, pp->msi_data)) {
+		dev_err(dev, "Failed to map MSI data\n");
+		__free_page(pp->msi_page);
+		pp->msi_page = NULL;
+		return;
+	}
+	msi_target = (u64)pp->msi_data;
+
+	/* Program the msi_data */
+	dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
+			    lower_32_bits(msi_target));
+	dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
+			    upper_32_bits(msi_target));
+}
+EXPORT_SYMBOL_GPL(dw_pcie_msi_init);
+
+int dw_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct device *dev = pci->dev;
+	struct device_node *np = dev->of_node;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct resource_entry *win, *tmp;
+	struct pci_bus *child;
+	struct pci_host_bridge *bridge;
+	struct resource *cfg_res;
+	u32 hdr_type;
+	int ret;
+
+	raw_spin_lock_init(&pci->pp.lock);
+
+	cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
+	if (cfg_res) {
+		pp->cfg0_size = resource_size(cfg_res) >> 1;
+		pp->cfg1_size = resource_size(cfg_res) >> 1;
+		pp->cfg0_base = cfg_res->start;
+		pp->cfg1_base = cfg_res->start + pp->cfg0_size;
+	} else if (!pp->va_cfg0_base) {
+		dev_err(dev, "Missing *config* reg space\n");
+	}
+
+	bridge = devm_pci_alloc_host_bridge(dev, 0);
+	if (!bridge)
+		return -ENOMEM;
+
+	ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+					&bridge->windows, &pp->io_base);
+	if (ret)
+		return ret;
+
+	ret = devm_request_pci_bus_resources(dev, &bridge->windows);
+	if (ret)
+		return ret;
+
+	/* Get the I/O and memory ranges from DT */
+	resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
+		switch (resource_type(win->res)) {
+		case IORESOURCE_IO:
+			ret = devm_pci_remap_iospace(dev, win->res,
+						     pp->io_base);
+			if (ret) {
+				dev_warn(dev, "Error %d: failed to map resource %pR\n",
+					 ret, win->res);
+				resource_list_destroy_entry(win);
+			} else {
+				pp->io = win->res;
+				pp->io->name = "I/O";
+				pp->io_size = resource_size(pp->io);
+				pp->io_bus_addr = pp->io->start - win->offset;
+			}
+			break;
+		case IORESOURCE_MEM:
+			pp->mem = win->res;
+			pp->mem->name = "MEM";
+			pp->mem_size = resource_size(pp->mem);
+			pp->mem_bus_addr = pp->mem->start - win->offset;
+			break;
+		case 0:
+			pp->cfg = win->res;
+			pp->cfg0_size = resource_size(pp->cfg) >> 1;
+			pp->cfg1_size = resource_size(pp->cfg) >> 1;
+			pp->cfg0_base = pp->cfg->start;
+			pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
+			break;
+		case IORESOURCE_BUS:
+			pp->busn = win->res;
+			break;
+		}
+	}
+
+	if (!pci->dbi_base) {
+		pci->dbi_base = devm_pci_remap_cfgspace(dev,
+						pp->cfg->start,
+						resource_size(pp->cfg));
+		if (!pci->dbi_base) {
+			dev_err(dev, "Error with ioremap\n");
+			return -ENOMEM;
+		}
+	}
+
+	pp->mem_base = pp->mem->start;
+
+	if (!pp->va_cfg0_base) {
+		pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
+					pp->cfg0_base, pp->cfg0_size);
+		if (!pp->va_cfg0_base) {
+			dev_err(dev, "Error with ioremap in function\n");
+			return -ENOMEM;
+		}
+	}
+
+	if (!pp->va_cfg1_base) {
+		pp->va_cfg1_base = devm_pci_remap_cfgspace(dev,
+						pp->cfg1_base,
+						pp->cfg1_size);
+		if (!pp->va_cfg1_base) {
+			dev_err(dev, "Error with ioremap\n");
+			return -ENOMEM;
+		}
+	}
+
+	ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
+	if (ret)
+		pci->num_viewport = 2;
+
+	if (pci_msi_enabled()) {
+		/*
+		 * If a specific SoC driver needs to change the
+		 * default number of vectors, it needs to implement
+		 * the set_num_vectors callback.
+		 */
+		if (!pp->ops->set_num_vectors) {
+			pp->num_vectors = MSI_DEF_NUM_VECTORS;
+		} else {
+			pp->ops->set_num_vectors(pp);
+
+			if (pp->num_vectors > MAX_MSI_IRQS ||
+			    pp->num_vectors == 0) {
+				dev_err(dev,
+					"Invalid number of vectors\n");
+				return -EINVAL;
+			}
+		}
+
+		if (!pp->ops->msi_host_init) {
+			pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
+
+			ret = dw_pcie_allocate_domains(pp);
+			if (ret)
+				return ret;
+
+			if (pp->msi_irq)
+				irq_set_chained_handler_and_data(pp->msi_irq,
+							    dw_chained_msi_isr,
+							    pp);
+		} else {
+			ret = pp->ops->msi_host_init(pp);
+			if (ret < 0)
+				return ret;
+		}
+	}
+
+	if (pp->ops->host_init) {
+		ret = pp->ops->host_init(pp);
+		if (ret)
+			goto err_free_msi;
+	}
+
+	ret = dw_pcie_rd_own_conf(pp, PCI_HEADER_TYPE, 1, &hdr_type);
+	if (ret != PCIBIOS_SUCCESSFUL) {
+		dev_err(pci->dev, "Failed reading PCI_HEADER_TYPE cfg space reg (ret: 0x%x)\n",
+			ret);
+		ret = pcibios_err_to_errno(ret);
+		goto err_free_msi;
+	}
+	if (hdr_type != PCI_HEADER_TYPE_BRIDGE) {
+		dev_err(pci->dev,
+			"PCIe controller is not set to bridge type (hdr_type: 0x%x)!\n",
+			hdr_type);
+		ret = -EIO;
+		goto err_free_msi;
+	}
+
+	pp->root_bus_nr = pp->busn->start;
+
+	bridge->dev.parent = dev;
+	bridge->sysdata = pp;
+	bridge->busnr = pp->root_bus_nr;
+	bridge->ops = &dw_pcie_ops;
+	bridge->map_irq = of_irq_parse_and_map_pci;
+	bridge->swizzle_irq = pci_common_swizzle;
+
+	ret = pci_scan_root_bus_bridge(bridge);
+	if (ret)
+		goto err_free_msi;
+
+	pp->root_bus = bridge->bus;
+
+	if (pp->ops->scan_bus)
+		pp->ops->scan_bus(pp);
+
+	pci_bus_size_bridges(pp->root_bus);
+	pci_bus_assign_resources(pp->root_bus);
+
+	list_for_each_entry(child, &pp->root_bus->children, node)
+		pcie_bus_configure_settings(child);
+
+	pci_bus_add_devices(pp->root_bus);
+	return 0;
+
+err_free_msi:
+	if (pci_msi_enabled() && !pp->ops->msi_host_init)
+		dw_pcie_free_msi(pp);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_host_init);
+
+void dw_pcie_host_deinit(struct pcie_port *pp)
+{
+	pci_stop_root_bus(pp->root_bus);
+	pci_remove_root_bus(pp->root_bus);
+	if (pci_msi_enabled() && !pp->ops->msi_host_init)
+		dw_pcie_free_msi(pp);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
+
+static int dw_pcie_access_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+				     u32 devfn, int where, int size, u32 *val,
+				     bool write)
+{
+	int ret, type;
+	u32 busdev, cfg_size;
+	u64 cpu_addr;
+	void __iomem *va_cfg_base;
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+	busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
+		 PCIE_ATU_FUNC(PCI_FUNC(devfn));
+
+	if (bus->parent->number == pp->root_bus_nr) {
+		type = PCIE_ATU_TYPE_CFG0;
+		cpu_addr = pp->cfg0_base;
+		cfg_size = pp->cfg0_size;
+		va_cfg_base = pp->va_cfg0_base;
+	} else {
+		type = PCIE_ATU_TYPE_CFG1;
+		cpu_addr = pp->cfg1_base;
+		cfg_size = pp->cfg1_size;
+		va_cfg_base = pp->va_cfg1_base;
+	}
+
+	dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
+				  type, cpu_addr,
+				  busdev, cfg_size);
+	if (write)
+		ret = dw_pcie_write(va_cfg_base + where, size, *val);
+	else
+		ret = dw_pcie_read(va_cfg_base + where, size, val);
+
+	if (pci->num_viewport <= 2)
+		dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
+					  PCIE_ATU_TYPE_IO, pp->io_base,
+					  pp->io_bus_addr, pp->io_size);
+
+	return ret;
+}
+
+static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+				 u32 devfn, int where, int size, u32 *val)
+{
+	if (pp->ops->rd_other_conf)
+		return pp->ops->rd_other_conf(pp, bus, devfn, where,
+					      size, val);
+
+	return dw_pcie_access_other_conf(pp, bus, devfn, where, size, val,
+					 false);
+}
+
+static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+				 u32 devfn, int where, int size, u32 val)
+{
+	if (pp->ops->wr_other_conf)
+		return pp->ops->wr_other_conf(pp, bus, devfn, where,
+					      size, val);
+
+	return dw_pcie_access_other_conf(pp, bus, devfn, where, size, &val,
+					 true);
+}
+
+static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
+				int dev)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+	/* If there is no link, then there is no device */
+	if (bus->number != pp->root_bus_nr) {
+		if (!dw_pcie_link_up(pci))
+			return 0;
+	}
+
+	/* Access only one slot on each root port */
+	if (bus->number == pp->root_bus_nr && dev > 0)
+		return 0;
+
+	return 1;
+}
+
+static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+			   int size, u32 *val)
+{
+	struct pcie_port *pp = bus->sysdata;
+
+	if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
+		*val = 0xffffffff;
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+
+	if (bus->number == pp->root_bus_nr)
+		return dw_pcie_rd_own_conf(pp, where, size, val);
+
+	return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
+}
+
+static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+			   int where, int size, u32 val)
+{
+	struct pcie_port *pp = bus->sysdata;
+
+	if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	if (bus->number == pp->root_bus_nr)
+		return dw_pcie_wr_own_conf(pp, where, size, val);
+
+	return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
+}
+
+static struct pci_ops dw_pcie_ops = {
+	.read = dw_pcie_rd_conf,
+	.write = dw_pcie_wr_conf,
+};
+
+void dw_pcie_setup_rc(struct pcie_port *pp)
+{
+	u32 val, ctrl, num_ctrls;
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+	/*
+	 * Enable DBI read-only registers for writing/updating configuration.
+	 * Write permission gets disabled towards the end of this function.
+	 */
+	dw_pcie_dbi_ro_wr_en(pci);
+
+	dw_pcie_setup(pci);
+
+	if (!pp->ops->msi_host_init) {
+		num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+		/* Initialize IRQ Status array */
+		for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+			pp->irq_mask[ctrl] = ~0;
+			dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK +
+					    (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+					    4, pp->irq_mask[ctrl]);
+			dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
+					    (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+					    4, ~0);
+		}
+	}
+
+	/* Setup RC BARs */
+	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
+	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
+
+	/* Setup interrupt pins */
+	val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
+	val &= 0xffff00ff;
+	val |= 0x00000100;
+	dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
+
+	/* Setup bus numbers */
+	val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
+	val &= 0xff000000;
+	val |= 0x00ff0100;
+	dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
+
+	/* Setup command register */
+	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
+	val &= 0xffff0000;
+	val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
+		PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
+	dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
+
+	/*
+	 * If the platform provides ->rd_other_conf, it means the platform
+	 * uses its own address translation component rather than ATU, so
+	 * we should not program the ATU here.
+	 */
+	if (!pp->ops->rd_other_conf) {
+		dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
+					  PCIE_ATU_TYPE_MEM, pp->mem_base,
+					  pp->mem_bus_addr, pp->mem_size);
+		if (pci->num_viewport > 2)
+			dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2,
+						  PCIE_ATU_TYPE_IO, pp->io_base,
+						  pp->io_bus_addr, pp->io_size);
+	}
+
+	dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
+
+	/* Program correct class for RC */
+	dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
+
+	dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
+	val |= PORT_LOGIC_SPEED_CHANGE;
+	dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
+
+	dw_pcie_dbi_ro_wr_dis(pci);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-designware-plat.c b/marvell/linux/drivers/pci/controller/dwc/pcie-designware-plat.c
new file mode 100644
index 0000000..b58fdcb
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe RC driver for Synopsys DesignWare Core
+ *
+ * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Authors: Joao Pinto <Joao.Pinto@synopsys.com>
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+#include <linux/regmap.h>
+
+#include "pcie-designware.h"
+
+struct dw_plat_pcie {
+	struct dw_pcie			*pci;
+	struct regmap			*regmap;
+	enum dw_pcie_device_mode	mode;
+};
+
+struct dw_plat_pcie_of_data {
+	enum dw_pcie_device_mode	mode;
+};
+
+static const struct of_device_id dw_plat_pcie_of_match[];
+
+static int dw_plat_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+	dw_pcie_setup_rc(pp);
+	dw_pcie_wait_for_link(pci);
+
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		dw_pcie_msi_init(pp);
+
+	return 0;
+}
+
+static void dw_plat_set_num_vectors(struct pcie_port *pp)
+{
+	pp->num_vectors = MAX_MSI_IRQS;
+}
+
+static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
+	.host_init = dw_plat_pcie_host_init,
+	.set_num_vectors = dw_plat_set_num_vectors,
+};
+
+static int dw_plat_pcie_establish_link(struct dw_pcie *pci)
+{
+	return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	.start_link = dw_plat_pcie_establish_link,
+};
+
+static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	enum pci_barno bar;
+
+	for (bar = BAR_0; bar <= BAR_5; bar++)
+		dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+				     enum pci_epc_irq_type type,
+				     u16 interrupt_num)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	switch (type) {
+	case PCI_EPC_IRQ_LEGACY:
+		return dw_pcie_ep_raise_legacy_irq(ep, func_no);
+	case PCI_EPC_IRQ_MSI:
+		return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+	case PCI_EPC_IRQ_MSIX:
+		return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+	default:
+		dev_err(pci->dev, "UNKNOWN IRQ type\n");
+	}
+
+	return 0;
+}
+
+static const struct pci_epc_features dw_plat_pcie_epc_features = {
+	.linkup_notifier = false,
+	.msi_capable = true,
+	.msix_capable = true,
+};
+
+static const struct pci_epc_features*
+dw_plat_pcie_get_features(struct dw_pcie_ep *ep)
+{
+	return &dw_plat_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+	.ep_init = dw_plat_pcie_ep_init,
+	.raise_irq = dw_plat_pcie_ep_raise_irq,
+	.get_features = dw_plat_pcie_get_features,
+};
+
+static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
+				 struct platform_device *pdev)
+{
+	struct dw_pcie *pci = dw_plat_pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	pp->irq = platform_get_irq(pdev, 1);
+	if (pp->irq < 0)
+		return pp->irq;
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		pp->msi_irq = platform_get_irq(pdev, 0);
+		if (pp->msi_irq < 0)
+			return pp->msi_irq;
+	}
+
+	pp->ops = &dw_plat_pcie_host_ops;
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "Failed to initialize host\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int dw_plat_add_pcie_ep(struct dw_plat_pcie *dw_plat_pcie,
+			       struct platform_device *pdev)
+{
+	int ret;
+	struct dw_pcie_ep *ep;
+	struct resource *res;
+	struct device *dev = &pdev->dev;
+	struct dw_pcie *pci = dw_plat_pcie->pci;
+
+	ep = &pci->ep;
+	ep->ops = &pcie_ep_ops;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
+	pci->dbi_base2 = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pci->dbi_base2))
+		return PTR_ERR(pci->dbi_base2);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+	if (!res)
+		return -EINVAL;
+
+	ep->phys_base = res->start;
+	ep->addr_size = resource_size(res);
+
+	ret = dw_pcie_ep_init(ep);
+	if (ret) {
+		dev_err(dev, "Failed to initialize endpoint\n");
+		return ret;
+	}
+	return 0;
+}
+
+static int dw_plat_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dw_plat_pcie *dw_plat_pcie;
+	struct dw_pcie *pci;
+	struct resource *res;  /* Resource from DT */
+	int ret;
+	const struct of_device_id *match;
+	const struct dw_plat_pcie_of_data *data;
+	enum dw_pcie_device_mode mode;
+
+	match = of_match_device(dw_plat_pcie_of_match, dev);
+	if (!match)
+		return -EINVAL;
+
+	data = (struct dw_plat_pcie_of_data *)match->data;
+	mode = (enum dw_pcie_device_mode)data->mode;
+
+	dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL);
+	if (!dw_plat_pcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	pci->dev = dev;
+	pci->ops = &dw_pcie_ops;
+
+	dw_plat_pcie->pci = pci;
+	dw_plat_pcie->mode = mode;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+	if (!res)
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	pci->dbi_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pci->dbi_base))
+		return PTR_ERR(pci->dbi_base);
+
+	platform_set_drvdata(pdev, dw_plat_pcie);
+
+	switch (dw_plat_pcie->mode) {
+	case DW_PCIE_RC_TYPE:
+		if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_HOST))
+			return -ENODEV;
+
+		ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev);
+		if (ret < 0)
+			return ret;
+		break;
+	case DW_PCIE_EP_TYPE:
+		if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP))
+			return -ENODEV;
+
+		ret = dw_plat_add_pcie_ep(dw_plat_pcie, pdev);
+		if (ret < 0)
+			return ret;
+		break;
+	default:
+		dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
+	}
+
+	return 0;
+}
+
+static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = {
+	.mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dw_plat_pcie_of_data dw_plat_pcie_ep_of_data = {
+	.mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct of_device_id dw_plat_pcie_of_match[] = {
+	{
+		.compatible = "snps,dw-pcie",
+		.data = &dw_plat_pcie_rc_of_data,
+	},
+	{
+		.compatible = "snps,dw-pcie-ep",
+		.data = &dw_plat_pcie_ep_of_data,
+	},
+	{},
+};
+
+static struct platform_driver dw_plat_pcie_driver = {
+	.driver = {
+		.name	= "dw-pcie",
+		.of_match_table = dw_plat_pcie_of_match,
+		.suppress_bind_attrs = true,
+	},
+	.probe = dw_plat_pcie_probe,
+};
+builtin_platform_driver(dw_plat_pcie_driver);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-designware.c b/marvell/linux/drivers/pci/controller/dwc/pcie-designware.c
new file mode 100644
index 0000000..820488d
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-designware.c
@@ -0,0 +1,559 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synopsys DesignWare PCIe host controller driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+/*
+ * These interfaces resemble the pci_find_*capability() interfaces, but these
+ * are for configuring host controllers, which are bridges *to* PCI devices but
+ * are not PCI devices themselves.
+ */
+static u8 __dw_pcie_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
+				  u8 cap)
+{
+	u8 cap_id, next_cap_ptr;
+	u16 reg;
+
+	if (!cap_ptr)
+		return 0;
+
+	reg = dw_pcie_readw_dbi(pci, cap_ptr);
+	cap_id = (reg & 0x00ff);
+
+	if (cap_id > PCI_CAP_ID_MAX)
+		return 0;
+
+	if (cap_id == cap)
+		return cap_ptr;
+
+	next_cap_ptr = (reg & 0xff00) >> 8;
+	return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
+}
+
+u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap)
+{
+	u8 next_cap_ptr;
+	u16 reg;
+
+	reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
+	next_cap_ptr = (reg & 0x00ff);
+
+	return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_find_capability);
+
+static u16 dw_pcie_find_next_ext_capability(struct dw_pcie *pci, u16 start,
+					    u8 cap)
+{
+	u32 header;
+	int ttl;
+	int pos = PCI_CFG_SPACE_SIZE;
+
+	/* minimum 8 bytes per capability */
+	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
+
+	if (start)
+		pos = start;
+
+	header = dw_pcie_readl_dbi(pci, pos);
+	/*
+	 * If we have no capabilities, this is indicated by cap ID,
+	 * cap version and next pointer all being 0.
+	 */
+	if (header == 0)
+		return 0;
+
+	while (ttl-- > 0) {
+		if (PCI_EXT_CAP_ID(header) == cap && pos != start)
+			return pos;
+
+		pos = PCI_EXT_CAP_NEXT(header);
+		if (pos < PCI_CFG_SPACE_SIZE)
+			break;
+
+		header = dw_pcie_readl_dbi(pci, pos);
+	}
+
+	return 0;
+}
+
+u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
+{
+	return dw_pcie_find_next_ext_capability(pci, 0, cap);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
+
+int dw_pcie_read(void __iomem *addr, int size, u32 *val)
+{
+	if (!IS_ALIGNED((uintptr_t)addr, size)) {
+		*val = 0;
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+	}
+
+	if (size == 4) {
+		*val = readl(addr);
+	} else if (size == 2) {
+		*val = readw(addr);
+	} else if (size == 1) {
+		*val = readb(addr);
+	} else {
+		*val = 0;
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+	}
+
+	return PCIBIOS_SUCCESSFUL;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_read);
+
+int dw_pcie_write(void __iomem *addr, int size, u32 val)
+{
+	if (!IS_ALIGNED((uintptr_t)addr, size))
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+
+	if (size == 4)
+		writel(val, addr);
+	else if (size == 2)
+		writew(val, addr);
+	else if (size == 1)
+		writeb(val, addr);
+	else
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+
+	return PCIBIOS_SUCCESSFUL;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_write);
+
+u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size)
+{
+	int ret;
+	u32 val;
+
+	if (pci->ops->read_dbi)
+		return pci->ops->read_dbi(pci, pci->dbi_base, reg, size);
+
+	ret = dw_pcie_read(pci->dbi_base + reg, size, &val);
+	if (ret)
+		dev_err(pci->dev, "Read DBI address failed\n");
+
+	return val;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_read_dbi);
+
+void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
+{
+	int ret;
+
+	if (pci->ops->write_dbi) {
+		pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val);
+		return;
+	}
+
+	ret = dw_pcie_write(pci->dbi_base + reg, size, val);
+	if (ret)
+		dev_err(pci->dev, "Write DBI address failed\n");
+}
+EXPORT_SYMBOL_GPL(dw_pcie_write_dbi);
+
+u32 dw_pcie_read_dbi2(struct dw_pcie *pci, u32 reg, size_t size)
+{
+	int ret;
+	u32 val;
+
+	if (pci->ops->read_dbi2)
+		return pci->ops->read_dbi2(pci, pci->dbi_base2, reg, size);
+
+	ret = dw_pcie_read(pci->dbi_base2 + reg, size, &val);
+	if (ret)
+		dev_err(pci->dev, "read DBI address failed\n");
+
+	return val;
+}
+
+void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
+{
+	int ret;
+
+	if (pci->ops->write_dbi2) {
+		pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
+		return;
+	}
+
+	ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
+	if (ret)
+		dev_err(pci->dev, "write DBI address failed\n");
+}
+
+u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size)
+{
+	int ret;
+	u32 val;
+
+	if (pci->ops->read_dbi)
+		return pci->ops->read_dbi(pci, pci->atu_base, reg, size);
+
+	ret = dw_pcie_read(pci->atu_base + reg, size, &val);
+	if (ret)
+		dev_err(pci->dev, "Read ATU address failed\n");
+
+	return val;
+}
+
+void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
+{
+	int ret;
+
+	if (pci->ops->write_dbi) {
+		pci->ops->write_dbi(pci, pci->atu_base, reg, size, val);
+		return;
+	}
+
+	ret = dw_pcie_write(pci->atu_base + reg, size, val);
+	if (ret)
+		dev_err(pci->dev, "Write ATU address failed\n");
+}
+
+static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
+{
+	u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
+
+	return dw_pcie_readl_atu(pci, offset + reg);
+}
+
+static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
+				     u32 val)
+{
+	u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
+
+	dw_pcie_writel_atu(pci, offset + reg, val);
+}
+
+static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
+					     int type, u64 cpu_addr,
+					     u64 pci_addr, u32 size)
+{
+	u32 retries, val;
+
+	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
+				 lower_32_bits(cpu_addr));
+	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
+				 upper_32_bits(cpu_addr));
+	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT,
+				 lower_32_bits(cpu_addr + size - 1));
+	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
+				 lower_32_bits(pci_addr));
+	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
+				 upper_32_bits(pci_addr));
+	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
+				 type);
+	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
+				 PCIE_ATU_ENABLE);
+
+	/*
+	 * Make sure ATU enable takes effect before any subsequent config
+	 * and I/O accesses.
+	 */
+	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
+		val = dw_pcie_readl_ob_unroll(pci, index,
+					      PCIE_ATU_UNR_REGION_CTRL2);
+		if (val & PCIE_ATU_ENABLE)
+			return;
+
+		mdelay(LINK_WAIT_IATU);
+	}
+	dev_err(pci->dev, "Outbound iATU is not being enabled\n");
+}
+
+void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
+			       u64 cpu_addr, u64 pci_addr, u32 size)
+{
+	u32 retries, val;
+
+	if (pci->ops->cpu_addr_fixup)
+		cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
+
+	if (pci->iatu_unroll_enabled) {
+		dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr,
+						 pci_addr, size);
+		return;
+	}
+
+	dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
+			   PCIE_ATU_REGION_OUTBOUND | index);
+	dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
+			   lower_32_bits(cpu_addr));
+	dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
+			   upper_32_bits(cpu_addr));
+	dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
+			   lower_32_bits(cpu_addr + size - 1));
+	dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
+			   lower_32_bits(pci_addr));
+	dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
+			   upper_32_bits(pci_addr));
+	dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
+	dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
+
+	/*
+	 * Make sure ATU enable takes effect before any subsequent config
+	 * and I/O accesses.
+	 */
+	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
+		val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
+		if (val & PCIE_ATU_ENABLE)
+			return;
+
+		mdelay(LINK_WAIT_IATU);
+	}
+	dev_err(pci->dev, "Outbound iATU is not being enabled\n");
+}
+
+static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
+{
+	u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
+
+	return dw_pcie_readl_atu(pci, offset + reg);
+}
+
+static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
+				     u32 val)
+{
+	u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
+
+	dw_pcie_writel_atu(pci, offset + reg, val);
+}
+
+static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
+					   int bar, u64 cpu_addr,
+					   enum dw_pcie_as_type as_type)
+{
+	int type;
+	u32 retries, val;
+
+	dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
+				 lower_32_bits(cpu_addr));
+	dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
+				 upper_32_bits(cpu_addr));
+
+	switch (as_type) {
+	case DW_PCIE_AS_MEM:
+		type = PCIE_ATU_TYPE_MEM;
+		break;
+	case DW_PCIE_AS_IO:
+		type = PCIE_ATU_TYPE_IO;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type);
+	dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
+				 PCIE_ATU_ENABLE |
+				 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
+
+	/*
+	 * Make sure ATU enable takes effect before any subsequent config
+	 * and I/O accesses.
+	 */
+	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
+		val = dw_pcie_readl_ib_unroll(pci, index,
+					      PCIE_ATU_UNR_REGION_CTRL2);
+		if (val & PCIE_ATU_ENABLE)
+			return 0;
+
+		mdelay(LINK_WAIT_IATU);
+	}
+	dev_err(pci->dev, "Inbound iATU is not being enabled\n");
+
+	return -EBUSY;
+}
+
+int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
+			     u64 cpu_addr, enum dw_pcie_as_type as_type)
+{
+	int type;
+	u32 retries, val;
+
+	if (pci->iatu_unroll_enabled)
+		return dw_pcie_prog_inbound_atu_unroll(pci, index, bar,
+						       cpu_addr, as_type);
+
+	dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
+			   index);
+	dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
+	dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
+
+	switch (as_type) {
+	case DW_PCIE_AS_MEM:
+		type = PCIE_ATU_TYPE_MEM;
+		break;
+	case DW_PCIE_AS_IO:
+		type = PCIE_ATU_TYPE_IO;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
+	dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE
+			   | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
+
+	/*
+	 * Make sure ATU enable takes effect before any subsequent config
+	 * and I/O accesses.
+	 */
+	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
+		val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
+		if (val & PCIE_ATU_ENABLE)
+			return 0;
+
+		mdelay(LINK_WAIT_IATU);
+	}
+	dev_err(pci->dev, "Inbound iATU is not being enabled\n");
+
+	return -EBUSY;
+}
+
+void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
+			 enum dw_pcie_region_type type)
+{
+	int region;
+
+	switch (type) {
+	case DW_PCIE_REGION_INBOUND:
+		region = PCIE_ATU_REGION_INBOUND;
+		break;
+	case DW_PCIE_REGION_OUTBOUND:
+		region = PCIE_ATU_REGION_OUTBOUND;
+		break;
+	default:
+		return;
+	}
+
+	dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
+	dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, (u32)~PCIE_ATU_ENABLE);
+}
+
+int dw_pcie_wait_for_link(struct dw_pcie *pci)
+{
+	int retries;
+
+	/* Check if the link is up or not */
+	for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+		if (dw_pcie_link_up(pci)) {
+			dev_info(pci->dev, "Link up\n");
+			return 0;
+		}
+		usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+	}
+
+	dev_info(pci->dev, "Phy link never came up\n");
+
+	return -ETIMEDOUT;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link);
+
+int dw_pcie_link_up(struct dw_pcie *pci)
+{
+	u32 val;
+
+	if (pci->ops->link_up)
+		return pci->ops->link_up(pci);
+
+	val = readl(pci->dbi_base + PCIE_PORT_DEBUG1);
+	return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
+		(!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
+}
+
+static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
+{
+	u32 val;
+
+	val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
+	if (val == 0xffffffff)
+		return 1;
+
+	return 0;
+}
+
+void dw_pcie_setup(struct dw_pcie *pci)
+{
+	int ret;
+	u32 val;
+	u32 lanes;
+	struct device *dev = pci->dev;
+	struct device_node *np = dev->of_node;
+
+	if (pci->version >= 0x480A || (!pci->version &&
+				       dw_pcie_iatu_unroll_enabled(pci))) {
+		pci->iatu_unroll_enabled = true;
+		if (!pci->atu_base)
+			pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
+	}
+	dev_dbg(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
+		"enabled" : "disabled");
+
+
+	ret = of_property_read_u32(np, "num-lanes", &lanes);
+	if (ret) {
+		dev_dbg(pci->dev, "property num-lanes isn't found\n");
+		return;
+	}
+
+	/* Set the number of lanes */
+	val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
+	val &= ~PORT_LINK_MODE_MASK;
+	switch (lanes) {
+	case 1:
+		val |= PORT_LINK_MODE_1_LANES;
+		break;
+	case 2:
+		val |= PORT_LINK_MODE_2_LANES;
+		break;
+	case 4:
+		val |= PORT_LINK_MODE_4_LANES;
+		break;
+	case 8:
+		val |= PORT_LINK_MODE_8_LANES;
+		break;
+	default:
+		dev_err(pci->dev, "num-lanes %u: invalid value\n", lanes);
+		return;
+	}
+	dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+
+	/* Set link width speed control register */
+	val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+	val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
+	switch (lanes) {
+	case 1:
+		val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
+		break;
+	case 2:
+		val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
+		break;
+	case 4:
+		val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
+		break;
+	case 8:
+		val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
+		break;
+	}
+	dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+
+	if (of_property_read_bool(np, "snps,enable-cdm-check")) {
+		val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
+		val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
+		       PCIE_PL_CHK_REG_CHK_REG_START;
+		dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
+	}
+}
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-designware.h b/marvell/linux/drivers/pci/controller/dwc/pcie-designware.h
new file mode 100644
index 0000000..5a18e94
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-designware.h
@@ -0,0 +1,444 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Synopsys DesignWare PCIe host controller driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ */
+
+#ifndef _PCIE_DESIGNWARE_H
+#define _PCIE_DESIGNWARE_H
+
+#include <linux/bitfield.h>
+#include <linux/dma-mapping.h>
+#include <linux/irq.h>
+#include <linux/msi.h>
+#include <linux/pci.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES		10
+#define LINK_WAIT_USLEEP_MIN		90000
+#define LINK_WAIT_USLEEP_MAX		100000
+
+/* Parameters for the waiting for iATU enabled routine */
+#define LINK_WAIT_MAX_IATU_RETRIES	5
+#define LINK_WAIT_IATU			9
+
+/* Synopsys-specific PCIe configuration registers */
+#define PCIE_PORT_LINK_CONTROL		0x710
+#define PORT_LINK_MODE_MASK		GENMASK(21, 16)
+#define PORT_LINK_MODE(n)		FIELD_PREP(PORT_LINK_MODE_MASK, n)
+#define PORT_LINK_MODE_1_LANES		PORT_LINK_MODE(0x1)
+#define PORT_LINK_MODE_2_LANES		PORT_LINK_MODE(0x3)
+#define PORT_LINK_MODE_4_LANES		PORT_LINK_MODE(0x7)
+#define PORT_LINK_MODE_8_LANES		PORT_LINK_MODE(0xf)
+
+#define PCIE_PORT_DEBUG0		0x728
+#define PORT_LOGIC_LTSSM_STATE_MASK	0x1f
+#define PORT_LOGIC_LTSSM_STATE_L0	0x11
+#define PCIE_PORT_DEBUG1		0x72C
+#define PCIE_PORT_DEBUG1_LINK_UP		BIT(4)
+#define PCIE_PORT_DEBUG1_LINK_IN_TRAINING	BIT(29)
+
+#define PCIE_LINK_WIDTH_SPEED_CONTROL	0x80C
+#define PORT_LOGIC_SPEED_CHANGE		BIT(17)
+#define PORT_LOGIC_LINK_WIDTH_MASK	GENMASK(12, 8)
+#define PORT_LOGIC_LINK_WIDTH(n)	FIELD_PREP(PORT_LOGIC_LINK_WIDTH_MASK, n)
+#define PORT_LOGIC_LINK_WIDTH_1_LANES	PORT_LOGIC_LINK_WIDTH(0x1)
+#define PORT_LOGIC_LINK_WIDTH_2_LANES	PORT_LOGIC_LINK_WIDTH(0x2)
+#define PORT_LOGIC_LINK_WIDTH_4_LANES	PORT_LOGIC_LINK_WIDTH(0x4)
+#define PORT_LOGIC_LINK_WIDTH_8_LANES	PORT_LOGIC_LINK_WIDTH(0x8)
+
+#define PCIE_MSI_ADDR_LO		0x820
+#define PCIE_MSI_ADDR_HI		0x824
+#define PCIE_MSI_INTR0_ENABLE		0x828
+#define PCIE_MSI_INTR0_MASK		0x82C
+#define PCIE_MSI_INTR0_STATUS		0x830
+
+#define PCIE_ATU_VIEWPORT		0x900
+#define PCIE_ATU_REGION_INBOUND		BIT(31)
+#define PCIE_ATU_REGION_OUTBOUND	0
+#define PCIE_ATU_REGION_INDEX2		0x2
+#define PCIE_ATU_REGION_INDEX1		0x1
+#define PCIE_ATU_REGION_INDEX0		0x0
+#define PCIE_ATU_CR1			0x904
+#define PCIE_ATU_TYPE_MEM		0x0
+#define PCIE_ATU_TYPE_IO		0x2
+#define PCIE_ATU_TYPE_CFG0		0x4
+#define PCIE_ATU_TYPE_CFG1		0x5
+#define PCIE_ATU_CR2			0x908
+#define PCIE_ATU_ENABLE			BIT(31)
+#define PCIE_ATU_BAR_MODE_ENABLE	BIT(30)
+#define PCIE_ATU_LOWER_BASE		0x90C
+#define PCIE_ATU_UPPER_BASE		0x910
+#define PCIE_ATU_LIMIT			0x914
+#define PCIE_ATU_LOWER_TARGET		0x918
+#define PCIE_ATU_BUS(x)			FIELD_PREP(GENMASK(31, 24), x)
+#define PCIE_ATU_DEV(x)			FIELD_PREP(GENMASK(23, 19), x)
+#define PCIE_ATU_FUNC(x)		FIELD_PREP(GENMASK(18, 16), x)
+#define PCIE_ATU_UPPER_TARGET		0x91C
+
+#define PCIE_MISC_CONTROL_1_OFF		0x8BC
+#define PCIE_DBI_RO_WR_EN		BIT(0)
+
+#define PCIE_PL_CHK_REG_CONTROL_STATUS			0xB20
+#define PCIE_PL_CHK_REG_CHK_REG_START			BIT(0)
+#define PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS		BIT(1)
+#define PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR	BIT(16)
+#define PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR		BIT(17)
+#define PCIE_PL_CHK_REG_CHK_REG_COMPLETE		BIT(18)
+
+#define PCIE_PL_CHK_REG_ERR_ADDR			0xB28
+
+/*
+ * iATU Unroll-specific register definitions
+ * From 4.80 core version the address translation will be made by unroll
+ */
+#define PCIE_ATU_UNR_REGION_CTRL1	0x00
+#define PCIE_ATU_UNR_REGION_CTRL2	0x04
+#define PCIE_ATU_UNR_LOWER_BASE		0x08
+#define PCIE_ATU_UNR_UPPER_BASE		0x0C
+#define PCIE_ATU_UNR_LIMIT		0x10
+#define PCIE_ATU_UNR_LOWER_TARGET	0x14
+#define PCIE_ATU_UNR_UPPER_TARGET	0x18
+
+/*
+ * The default address offset between dbi_base and atu_base. Root controller
+ * drivers are not required to initialize atu_base if the offset matches this
+ * default; the driver core automatically derives atu_base from dbi_base using
+ * this offset, if atu_base not set.
+ */
+#define DEFAULT_DBI_ATU_OFFSET (0x3 << 20)
+
+/* Register address builder */
+#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \
+		((region) << 9)
+
+#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
+		(((region) << 9) | BIT(8))
+
+#define MAX_MSI_IRQS			256
+#define MAX_MSI_IRQS_PER_CTRL		32
+#define MAX_MSI_CTRLS			(MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL)
+#define MSI_REG_CTRL_BLOCK_SIZE		12
+#define MSI_DEF_NUM_VECTORS		32
+
+/* Maximum number of inbound/outbound iATUs */
+#define MAX_IATU_IN			256
+#define MAX_IATU_OUT			256
+
+struct pcie_port;
+struct dw_pcie;
+struct dw_pcie_ep;
+
+enum dw_pcie_region_type {
+	DW_PCIE_REGION_UNKNOWN,
+	DW_PCIE_REGION_INBOUND,
+	DW_PCIE_REGION_OUTBOUND,
+};
+
+enum dw_pcie_device_mode {
+	DW_PCIE_UNKNOWN_TYPE,
+	DW_PCIE_EP_TYPE,
+	DW_PCIE_LEG_EP_TYPE,
+	DW_PCIE_RC_TYPE,
+};
+
+struct dw_pcie_host_ops {
+	int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
+	int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val);
+	int (*rd_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
+			     unsigned int devfn, int where, int size, u32 *val);
+	int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
+			     unsigned int devfn, int where, int size, u32 val);
+	int (*host_init)(struct pcie_port *pp);
+	void (*scan_bus)(struct pcie_port *pp);
+	void (*set_num_vectors)(struct pcie_port *pp);
+	int (*msi_host_init)(struct pcie_port *pp);
+};
+
+struct pcie_port {
+	u8			root_bus_nr;
+	u64			cfg0_base;
+	void __iomem		*va_cfg0_base;
+	u32			cfg0_size;
+	u64			cfg1_base;
+	void __iomem		*va_cfg1_base;
+	u32			cfg1_size;
+	resource_size_t		io_base;
+	phys_addr_t		io_bus_addr;
+	u32			io_size;
+	u64			mem_base;
+	phys_addr_t		mem_bus_addr;
+	u32			mem_size;
+	struct resource		*cfg;
+	struct resource		*io;
+	struct resource		*mem;
+	struct resource		*busn;
+	int			irq;
+	const struct dw_pcie_host_ops *ops;
+	int			msi_irq;
+	struct irq_domain	*irq_domain;
+	struct irq_domain	*msi_domain;
+	dma_addr_t		msi_data;
+	struct page		*msi_page;
+	struct irq_chip		*msi_irq_chip;
+	u32			num_vectors;
+	u32			irq_mask[MAX_MSI_CTRLS];
+	struct pci_bus		*root_bus;
+	raw_spinlock_t		lock;
+	DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
+};
+
+enum dw_pcie_as_type {
+	DW_PCIE_AS_UNKNOWN,
+	DW_PCIE_AS_MEM,
+	DW_PCIE_AS_IO,
+};
+
+struct dw_pcie_ep_ops {
+	void	(*ep_init)(struct dw_pcie_ep *ep);
+	int	(*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
+			     enum pci_epc_irq_type type, u16 interrupt_num);
+	const struct pci_epc_features* (*get_features)(struct dw_pcie_ep *ep);
+};
+
+struct dw_pcie_ep {
+	struct pci_epc		*epc;
+	const struct dw_pcie_ep_ops *ops;
+	phys_addr_t		phys_base;
+	size_t			addr_size;
+	size_t			page_size;
+	u8			bar_to_atu[6];
+	phys_addr_t		*outbound_addr;
+	unsigned long		*ib_window_map;
+	unsigned long		*ob_window_map;
+	u32			num_ib_windows;
+	u32			num_ob_windows;
+	void __iomem		*msi_mem;
+	phys_addr_t		msi_mem_phys;
+	u8			msi_cap;	/* MSI capability offset */
+	u8			msix_cap;	/* MSI-X capability offset */
+};
+
+struct dw_pcie_ops {
+	u64	(*cpu_addr_fixup)(struct dw_pcie *pcie, u64 cpu_addr);
+	u32	(*read_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
+			    size_t size);
+	void	(*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
+			     size_t size, u32 val);
+	u32     (*read_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
+			     size_t size);
+	void    (*write_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
+			      size_t size, u32 val);
+	int	(*link_up)(struct dw_pcie *pcie);
+	int	(*start_link)(struct dw_pcie *pcie);
+	void	(*stop_link)(struct dw_pcie *pcie);
+};
+
+struct dw_pcie {
+	struct device		*dev;
+	void __iomem		*dbi_base;
+	void __iomem		*dbi_base2;
+	/* Used when iatu_unroll_enabled is true */
+	void __iomem		*atu_base;
+	u32			num_viewport;
+	u8			iatu_unroll_enabled;
+	struct pcie_port	pp;
+	struct dw_pcie_ep	ep;
+	const struct dw_pcie_ops *ops;
+	unsigned int		version;
+};
+
+#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
+
+#define to_dw_pcie_from_ep(endpoint)   \
+		container_of((endpoint), struct dw_pcie, ep)
+
+u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap);
+u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap);
+
+int dw_pcie_read(void __iomem *addr, int size, u32 *val);
+int dw_pcie_write(void __iomem *addr, int size, u32 val);
+
+u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size);
+void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
+u32 dw_pcie_read_dbi2(struct dw_pcie *pci, u32 reg, size_t size);
+void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
+u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size);
+void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
+int dw_pcie_link_up(struct dw_pcie *pci);
+int dw_pcie_wait_for_link(struct dw_pcie *pci);
+void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
+			       int type, u64 cpu_addr, u64 pci_addr,
+			       u32 size);
+int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
+			     u64 cpu_addr, enum dw_pcie_as_type as_type);
+void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
+			 enum dw_pcie_region_type type);
+void dw_pcie_setup(struct dw_pcie *pci);
+
+static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
+{
+	dw_pcie_write_dbi(pci, reg, 0x4, val);
+}
+
+static inline u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg)
+{
+	return dw_pcie_read_dbi(pci, reg, 0x4);
+}
+
+static inline void dw_pcie_writew_dbi(struct dw_pcie *pci, u32 reg, u16 val)
+{
+	dw_pcie_write_dbi(pci, reg, 0x2, val);
+}
+
+static inline u16 dw_pcie_readw_dbi(struct dw_pcie *pci, u32 reg)
+{
+	return dw_pcie_read_dbi(pci, reg, 0x2);
+}
+
+static inline void dw_pcie_writeb_dbi(struct dw_pcie *pci, u32 reg, u8 val)
+{
+	dw_pcie_write_dbi(pci, reg, 0x1, val);
+}
+
+static inline u8 dw_pcie_readb_dbi(struct dw_pcie *pci, u32 reg)
+{
+	return dw_pcie_read_dbi(pci, reg, 0x1);
+}
+
+static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val)
+{
+	dw_pcie_write_dbi2(pci, reg, 0x4, val);
+}
+
+static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg)
+{
+	return dw_pcie_read_dbi2(pci, reg, 0x4);
+}
+
+static inline void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
+{
+	dw_pcie_write_atu(pci, reg, 0x4, val);
+}
+
+static inline u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
+{
+	return dw_pcie_read_atu(pci, reg, 0x4);
+}
+
+static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci)
+{
+	u32 reg;
+	u32 val;
+
+	reg = PCIE_MISC_CONTROL_1_OFF;
+	val = dw_pcie_readl_dbi(pci, reg);
+	val |= PCIE_DBI_RO_WR_EN;
+	dw_pcie_writel_dbi(pci, reg, val);
+}
+
+static inline void dw_pcie_dbi_ro_wr_dis(struct dw_pcie *pci)
+{
+	u32 reg;
+	u32 val;
+
+	reg = PCIE_MISC_CONTROL_1_OFF;
+	val = dw_pcie_readl_dbi(pci, reg);
+	val &= ~PCIE_DBI_RO_WR_EN;
+	dw_pcie_writel_dbi(pci, reg, val);
+}
+
+#ifdef CONFIG_PCIE_DW_HOST
+irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
+void dw_pcie_msi_init(struct pcie_port *pp);
+void dw_pcie_free_msi(struct pcie_port *pp);
+void dw_pcie_setup_rc(struct pcie_port *pp);
+int dw_pcie_host_init(struct pcie_port *pp);
+void dw_pcie_host_deinit(struct pcie_port *pp);
+int dw_pcie_allocate_domains(struct pcie_port *pp);
+#else
+static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
+{
+	return IRQ_NONE;
+}
+
+static inline void dw_pcie_msi_init(struct pcie_port *pp)
+{
+}
+
+static inline void dw_pcie_free_msi(struct pcie_port *pp)
+{
+}
+
+static inline void dw_pcie_setup_rc(struct pcie_port *pp)
+{
+}
+
+static inline int dw_pcie_host_init(struct pcie_port *pp)
+{
+	return 0;
+}
+
+static inline void dw_pcie_host_deinit(struct pcie_port *pp)
+{
+}
+
+static inline int dw_pcie_allocate_domains(struct pcie_port *pp)
+{
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PCIE_DW_EP
+void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
+int dw_pcie_ep_init(struct dw_pcie_ep *ep);
+void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
+int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no);
+int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
+			     u8 interrupt_num);
+int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+			     u16 interrupt_num);
+void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
+#else
+static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
+{
+}
+
+static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+	return 0;
+}
+
+static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+{
+}
+
+static inline int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
+{
+	return 0;
+}
+
+static inline int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
+					   u8 interrupt_num)
+{
+	return 0;
+}
+
+static inline int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+					   u16 interrupt_num)
+{
+	return 0;
+}
+
+static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
+{
+}
+#endif
+#endif /* _PCIE_DESIGNWARE_H */
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-hisi.c b/marvell/linux/drivers/pci/controller/dwc/pcie-hisi.c
new file mode 100644
index 0000000..6d9e1b2
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-hisi.c
@@ -0,0 +1,398 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for HiSilicon SoCs
+ *
+ * Copyright (C) 2015 HiSilicon Co., Ltd. http://www.hisilicon.com
+ *
+ * Authors: Zhou Wang <wangzhou1@hisilicon.com>
+ *          Dacai Zhu <zhudacai@hisilicon.com>
+ *          Gabriele Paoloni <gabriele.paoloni@huawei.com>
+ */
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
+#include <linux/regmap.h>
+#include "../../pci.h"
+
+#if defined(CONFIG_PCI_HISI) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
+
+static int hisi_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+			     int size, u32 *val)
+{
+	struct pci_config_window *cfg = bus->sysdata;
+	int dev = PCI_SLOT(devfn);
+
+	if (bus->number == cfg->busr.start) {
+		/* access only one slot on each root port */
+		if (dev > 0)
+			return PCIBIOS_DEVICE_NOT_FOUND;
+		else
+			return pci_generic_config_read32(bus, devfn, where,
+							 size, val);
+	}
+
+	return pci_generic_config_read(bus, devfn, where, size, val);
+}
+
+static int hisi_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+			     int where, int size, u32 val)
+{
+	struct pci_config_window *cfg = bus->sysdata;
+	int dev = PCI_SLOT(devfn);
+
+	if (bus->number == cfg->busr.start) {
+		/* access only one slot on each root port */
+		if (dev > 0)
+			return PCIBIOS_DEVICE_NOT_FOUND;
+		else
+			return pci_generic_config_write32(bus, devfn, where,
+							  size, val);
+	}
+
+	return pci_generic_config_write(bus, devfn, where, size, val);
+}
+
+static void __iomem *hisi_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+				       int where)
+{
+	struct pci_config_window *cfg = bus->sysdata;
+	void __iomem *reg_base = cfg->priv;
+
+	if (bus->number == cfg->busr.start)
+		return reg_base + where;
+	else
+		return pci_ecam_map_bus(bus, devfn, where);
+}
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+
+static int hisi_pcie_init(struct pci_config_window *cfg)
+{
+	struct device *dev = cfg->parent;
+	struct acpi_device *adev = to_acpi_device(dev);
+	struct acpi_pci_root *root = acpi_driver_data(adev);
+	struct resource *res;
+	void __iomem *reg_base;
+	int ret;
+
+	/*
+	 * Retrieve RC base and size from a HISI0081 device with _UID
+	 * matching our segment.
+	 */
+	res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
+	if (!res)
+		return -ENOMEM;
+
+	ret = acpi_get_rc_resources(dev, "HISI0081", root->segment, res);
+	if (ret) {
+		dev_err(dev, "can't get rc base address\n");
+		return -ENOMEM;
+	}
+
+	reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
+	if (!reg_base)
+		return -ENOMEM;
+
+	cfg->priv = reg_base;
+	return 0;
+}
+
+struct pci_ecam_ops hisi_pcie_ops = {
+	.bus_shift    = 20,
+	.init         =  hisi_pcie_init,
+	.pci_ops      = {
+		.map_bus    = hisi_pcie_map_bus,
+		.read       = hisi_pcie_rd_conf,
+		.write      = hisi_pcie_wr_conf,
+	}
+};
+
+#endif
+
+#ifdef CONFIG_PCI_HISI
+
+#include "pcie-designware.h"
+
+#define PCIE_SUBCTRL_SYS_STATE4_REG		0x6818
+#define PCIE_HIP06_CTRL_OFF			0x1000
+#define PCIE_SYS_STATE4				(PCIE_HIP06_CTRL_OFF + 0x31c)
+#define PCIE_LTSSM_LINKUP_STATE			0x11
+#define PCIE_LTSSM_STATE_MASK			0x3F
+
+#define to_hisi_pcie(x)	dev_get_drvdata((x)->dev)
+
+struct hisi_pcie;
+
+struct pcie_soc_ops {
+	int (*hisi_pcie_link_up)(struct hisi_pcie *hisi_pcie);
+};
+
+struct hisi_pcie {
+	struct dw_pcie *pci;
+	struct regmap *subctrl;
+	u32 port_id;
+	const struct pcie_soc_ops *soc_ops;
+};
+
+/* HipXX PCIe host only supports 32-bit config access */
+static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size,
+			      u32 *val)
+{
+	u32 reg;
+	u32 reg_val;
+	void *walker = &reg_val;
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+	walker += (where & 0x3);
+	reg = where & ~0x3;
+	reg_val = dw_pcie_readl_dbi(pci, reg);
+
+	if (size == 1)
+		*val = *(u8 __force *) walker;
+	else if (size == 2)
+		*val = *(u16 __force *) walker;
+	else if (size == 4)
+		*val = reg_val;
+	else
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+/* HipXX PCIe host only supports 32-bit config access */
+static int hisi_pcie_cfg_write(struct pcie_port *pp, int where, int  size,
+				u32 val)
+{
+	u32 reg_val;
+	u32 reg;
+	void *walker = &reg_val;
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+	walker += (where & 0x3);
+	reg = where & ~0x3;
+	if (size == 4)
+		dw_pcie_writel_dbi(pci, reg, val);
+	else if (size == 2) {
+		reg_val = dw_pcie_readl_dbi(pci, reg);
+		*(u16 __force *) walker = val;
+		dw_pcie_writel_dbi(pci, reg, reg_val);
+	} else if (size == 1) {
+		reg_val = dw_pcie_readl_dbi(pci, reg);
+		*(u8 __force *) walker = val;
+		dw_pcie_writel_dbi(pci, reg, reg_val);
+	} else
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int hisi_pcie_link_up_hip05(struct hisi_pcie *hisi_pcie)
+{
+	u32 val;
+
+	regmap_read(hisi_pcie->subctrl, PCIE_SUBCTRL_SYS_STATE4_REG +
+		    0x100 * hisi_pcie->port_id, &val);
+
+	return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE);
+}
+
+static int hisi_pcie_link_up_hip06(struct hisi_pcie *hisi_pcie)
+{
+	struct dw_pcie *pci = hisi_pcie->pci;
+	u32 val;
+
+	val = dw_pcie_readl_dbi(pci, PCIE_SYS_STATE4);
+
+	return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE);
+}
+
+static int hisi_pcie_link_up(struct dw_pcie *pci)
+{
+	struct hisi_pcie *hisi_pcie = to_hisi_pcie(pci);
+
+	return hisi_pcie->soc_ops->hisi_pcie_link_up(hisi_pcie);
+}
+
+static const struct dw_pcie_host_ops hisi_pcie_host_ops = {
+	.rd_own_conf = hisi_pcie_cfg_read,
+	.wr_own_conf = hisi_pcie_cfg_write,
+};
+
+static int hisi_add_pcie_port(struct hisi_pcie *hisi_pcie,
+			      struct platform_device *pdev)
+{
+	struct dw_pcie *pci = hisi_pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = &pdev->dev;
+	int ret;
+	u32 port_id;
+
+	if (of_property_read_u32(dev->of_node, "port-id", &port_id)) {
+		dev_err(dev, "failed to read port-id\n");
+		return -EINVAL;
+	}
+	if (port_id > 3) {
+		dev_err(dev, "Invalid port-id: %d\n", port_id);
+		return -EINVAL;
+	}
+	hisi_pcie->port_id = port_id;
+
+	pp->ops = &hisi_pcie_host_ops;
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "failed to initialize host\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	.link_up = hisi_pcie_link_up,
+};
+
+static int hisi_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dw_pcie *pci;
+	struct hisi_pcie *hisi_pcie;
+	struct resource *reg;
+	int ret;
+
+	hisi_pcie = devm_kzalloc(dev, sizeof(*hisi_pcie), GFP_KERNEL);
+	if (!hisi_pcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	pci->dev = dev;
+	pci->ops = &dw_pcie_ops;
+
+	hisi_pcie->pci = pci;
+
+	hisi_pcie->soc_ops = of_device_get_match_data(dev);
+
+	hisi_pcie->subctrl =
+	    syscon_regmap_lookup_by_compatible("hisilicon,pcie-sas-subctrl");
+	if (IS_ERR(hisi_pcie->subctrl)) {
+		dev_err(dev, "cannot get subctrl base\n");
+		return PTR_ERR(hisi_pcie->subctrl);
+	}
+
+	reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi");
+	pci->dbi_base = devm_pci_remap_cfg_resource(dev, reg);
+	if (IS_ERR(pci->dbi_base))
+		return PTR_ERR(pci->dbi_base);
+	platform_set_drvdata(pdev, hisi_pcie);
+
+	ret = hisi_add_pcie_port(hisi_pcie, pdev);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static struct pcie_soc_ops hip05_ops = {
+		&hisi_pcie_link_up_hip05
+};
+
+static struct pcie_soc_ops hip06_ops = {
+		&hisi_pcie_link_up_hip06
+};
+
+static const struct of_device_id hisi_pcie_of_match[] = {
+	{
+			.compatible = "hisilicon,hip05-pcie",
+			.data	    = (void *) &hip05_ops,
+	},
+	{
+			.compatible = "hisilicon,hip06-pcie",
+			.data	    = (void *) &hip06_ops,
+	},
+	{},
+};
+
+static struct platform_driver hisi_pcie_driver = {
+	.probe  = hisi_pcie_probe,
+	.driver = {
+		   .name = "hisi-pcie",
+		   .of_match_table = hisi_pcie_of_match,
+		   .suppress_bind_attrs = true,
+	},
+};
+builtin_platform_driver(hisi_pcie_driver);
+
+static int hisi_pcie_almost_ecam_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct pci_ecam_ops *ops;
+
+	ops = (struct pci_ecam_ops *)of_device_get_match_data(dev);
+	return pci_host_common_probe(pdev, ops);
+}
+
+static int hisi_pcie_platform_init(struct pci_config_window *cfg)
+{
+	struct device *dev = cfg->parent;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct resource *res;
+	void __iomem *reg_base;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!res) {
+		dev_err(dev, "missing \"reg[1]\"property\n");
+		return -EINVAL;
+	}
+
+	reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
+	if (!reg_base)
+		return -ENOMEM;
+
+	cfg->priv = reg_base;
+	return 0;
+}
+
+struct pci_ecam_ops hisi_pcie_platform_ops = {
+	.bus_shift    = 20,
+	.init         =  hisi_pcie_platform_init,
+	.pci_ops      = {
+		.map_bus    = hisi_pcie_map_bus,
+		.read       = hisi_pcie_rd_conf,
+		.write      = hisi_pcie_wr_conf,
+	}
+};
+
+static const struct of_device_id hisi_pcie_almost_ecam_of_match[] = {
+	{
+		.compatible =  "hisilicon,hip06-pcie-ecam",
+		.data	    = (void *) &hisi_pcie_platform_ops,
+	},
+	{
+		.compatible =  "hisilicon,hip07-pcie-ecam",
+		.data       = (void *) &hisi_pcie_platform_ops,
+	},
+	{},
+};
+
+static struct platform_driver hisi_pcie_almost_ecam_driver = {
+	.probe  = hisi_pcie_almost_ecam_probe,
+	.driver = {
+		   .name = "hisi-pcie-almost-ecam",
+		   .of_match_table = hisi_pcie_almost_ecam_of_match,
+		   .suppress_bind_attrs = true,
+	},
+};
+builtin_platform_driver(hisi_pcie_almost_ecam_driver);
+
+#endif
+#endif
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-histb.c b/marvell/linux/drivers/pci/controller/dwc/pcie-histb.c
new file mode 100644
index 0000000..811b5c6
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-histb.c
@@ -0,0 +1,471 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for HiSilicon STB SoCs
+ *
+ * Copyright (C) 2016-2017 HiSilicon Co., Ltd. http://www.hisilicon.com
+ *
+ * Authors: Ruqiang Ju <juruqiang@hisilicon.com>
+ *          Jianguo Sun <sunjianguo1@huawei.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+#define to_histb_pcie(x)	dev_get_drvdata((x)->dev)
+
+#define PCIE_SYS_CTRL0			0x0000
+#define PCIE_SYS_CTRL1			0x0004
+#define PCIE_SYS_CTRL7			0x001C
+#define PCIE_SYS_CTRL13			0x0034
+#define PCIE_SYS_CTRL15			0x003C
+#define PCIE_SYS_CTRL16			0x0040
+#define PCIE_SYS_CTRL17			0x0044
+
+#define PCIE_SYS_STAT0			0x0100
+#define PCIE_SYS_STAT4			0x0110
+
+#define PCIE_RDLH_LINK_UP		BIT(5)
+#define PCIE_XMLH_LINK_UP		BIT(15)
+#define PCIE_ELBI_SLV_DBI_ENABLE	BIT(21)
+#define PCIE_APP_LTSSM_ENABLE		BIT(11)
+
+#define PCIE_DEVICE_TYPE_MASK		GENMASK(31, 28)
+#define PCIE_WM_EP			0
+#define PCIE_WM_LEGACY			BIT(1)
+#define PCIE_WM_RC			BIT(30)
+
+#define PCIE_LTSSM_STATE_MASK		GENMASK(5, 0)
+#define PCIE_LTSSM_STATE_ACTIVE		0x11
+
+struct histb_pcie {
+	struct dw_pcie *pci;
+	struct clk *aux_clk;
+	struct clk *pipe_clk;
+	struct clk *sys_clk;
+	struct clk *bus_clk;
+	struct phy *phy;
+	struct reset_control *soft_reset;
+	struct reset_control *sys_reset;
+	struct reset_control *bus_reset;
+	void __iomem *ctrl;
+	int reset_gpio;
+	struct regulator *vpcie;
+};
+
+static u32 histb_pcie_readl(struct histb_pcie *histb_pcie, u32 reg)
+{
+	return readl(histb_pcie->ctrl + reg);
+}
+
+static void histb_pcie_writel(struct histb_pcie *histb_pcie, u32 reg, u32 val)
+{
+	writel(val, histb_pcie->ctrl + reg);
+}
+
+static void histb_pcie_dbi_w_mode(struct pcie_port *pp, bool enable)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct histb_pcie *hipcie = to_histb_pcie(pci);
+	u32 val;
+
+	val = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0);
+	if (enable)
+		val |= PCIE_ELBI_SLV_DBI_ENABLE;
+	else
+		val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+	histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, val);
+}
+
+static void histb_pcie_dbi_r_mode(struct pcie_port *pp, bool enable)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct histb_pcie *hipcie = to_histb_pcie(pci);
+	u32 val;
+
+	val = histb_pcie_readl(hipcie, PCIE_SYS_CTRL1);
+	if (enable)
+		val |= PCIE_ELBI_SLV_DBI_ENABLE;
+	else
+		val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+	histb_pcie_writel(hipcie, PCIE_SYS_CTRL1, val);
+}
+
+static u32 histb_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
+			       u32 reg, size_t size)
+{
+	u32 val;
+
+	histb_pcie_dbi_r_mode(&pci->pp, true);
+	dw_pcie_read(base + reg, size, &val);
+	histb_pcie_dbi_r_mode(&pci->pp, false);
+
+	return val;
+}
+
+static void histb_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
+				 u32 reg, size_t size, u32 val)
+{
+	histb_pcie_dbi_w_mode(&pci->pp, true);
+	dw_pcie_write(base + reg, size, val);
+	histb_pcie_dbi_w_mode(&pci->pp, false);
+}
+
+static int histb_pcie_rd_own_conf(struct pcie_port *pp, int where,
+				  int size, u32 *val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	int ret;
+
+	histb_pcie_dbi_r_mode(pp, true);
+	ret = dw_pcie_read(pci->dbi_base + where, size, val);
+	histb_pcie_dbi_r_mode(pp, false);
+
+	return ret;
+}
+
+static int histb_pcie_wr_own_conf(struct pcie_port *pp, int where,
+				  int size, u32 val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	int ret;
+
+	histb_pcie_dbi_w_mode(pp, true);
+	ret = dw_pcie_write(pci->dbi_base + where, size, val);
+	histb_pcie_dbi_w_mode(pp, false);
+
+	return ret;
+}
+
+static int histb_pcie_link_up(struct dw_pcie *pci)
+{
+	struct histb_pcie *hipcie = to_histb_pcie(pci);
+	u32 regval;
+	u32 status;
+
+	regval = histb_pcie_readl(hipcie, PCIE_SYS_STAT0);
+	status = histb_pcie_readl(hipcie, PCIE_SYS_STAT4);
+	status &= PCIE_LTSSM_STATE_MASK;
+	if ((regval & PCIE_XMLH_LINK_UP) && (regval & PCIE_RDLH_LINK_UP) &&
+	    (status == PCIE_LTSSM_STATE_ACTIVE))
+		return 1;
+
+	return 0;
+}
+
+static int histb_pcie_establish_link(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct histb_pcie *hipcie = to_histb_pcie(pci);
+	u32 regval;
+
+	if (dw_pcie_link_up(pci)) {
+		dev_info(pci->dev, "Link already up\n");
+		return 0;
+	}
+
+	/* PCIe RC work mode */
+	regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0);
+	regval &= ~PCIE_DEVICE_TYPE_MASK;
+	regval |= PCIE_WM_RC;
+	histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, regval);
+
+	/* setup root complex */
+	dw_pcie_setup_rc(pp);
+
+	/* assert LTSSM enable */
+	regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL7);
+	regval |= PCIE_APP_LTSSM_ENABLE;
+	histb_pcie_writel(hipcie, PCIE_SYS_CTRL7, regval);
+
+	return dw_pcie_wait_for_link(pci);
+}
+
+static int histb_pcie_host_init(struct pcie_port *pp)
+{
+	histb_pcie_establish_link(pp);
+
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		dw_pcie_msi_init(pp);
+
+	return 0;
+}
+
+static const struct dw_pcie_host_ops histb_pcie_host_ops = {
+	.rd_own_conf = histb_pcie_rd_own_conf,
+	.wr_own_conf = histb_pcie_wr_own_conf,
+	.host_init = histb_pcie_host_init,
+};
+
+static void histb_pcie_host_disable(struct histb_pcie *hipcie)
+{
+	reset_control_assert(hipcie->soft_reset);
+	reset_control_assert(hipcie->sys_reset);
+	reset_control_assert(hipcie->bus_reset);
+
+	clk_disable_unprepare(hipcie->aux_clk);
+	clk_disable_unprepare(hipcie->pipe_clk);
+	clk_disable_unprepare(hipcie->sys_clk);
+	clk_disable_unprepare(hipcie->bus_clk);
+
+	if (gpio_is_valid(hipcie->reset_gpio))
+		gpio_set_value_cansleep(hipcie->reset_gpio, 0);
+
+	if (hipcie->vpcie)
+		regulator_disable(hipcie->vpcie);
+}
+
+static int histb_pcie_host_enable(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct histb_pcie *hipcie = to_histb_pcie(pci);
+	struct device *dev = pci->dev;
+	int ret;
+
+	/* power on PCIe device if have */
+	if (hipcie->vpcie) {
+		ret = regulator_enable(hipcie->vpcie);
+		if (ret) {
+			dev_err(dev, "failed to enable regulator: %d\n", ret);
+			return ret;
+		}
+	}
+
+	if (gpio_is_valid(hipcie->reset_gpio))
+		gpio_set_value_cansleep(hipcie->reset_gpio, 1);
+
+	ret = clk_prepare_enable(hipcie->bus_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable bus clk\n");
+		goto err_bus_clk;
+	}
+
+	ret = clk_prepare_enable(hipcie->sys_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable sys clk\n");
+		goto err_sys_clk;
+	}
+
+	ret = clk_prepare_enable(hipcie->pipe_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable pipe clk\n");
+		goto err_pipe_clk;
+	}
+
+	ret = clk_prepare_enable(hipcie->aux_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable aux clk\n");
+		goto err_aux_clk;
+	}
+
+	reset_control_assert(hipcie->soft_reset);
+	reset_control_deassert(hipcie->soft_reset);
+
+	reset_control_assert(hipcie->sys_reset);
+	reset_control_deassert(hipcie->sys_reset);
+
+	reset_control_assert(hipcie->bus_reset);
+	reset_control_deassert(hipcie->bus_reset);
+
+	return 0;
+
+err_aux_clk:
+	clk_disable_unprepare(hipcie->pipe_clk);
+err_pipe_clk:
+	clk_disable_unprepare(hipcie->sys_clk);
+err_sys_clk:
+	clk_disable_unprepare(hipcie->bus_clk);
+err_bus_clk:
+	if (hipcie->vpcie)
+		regulator_disable(hipcie->vpcie);
+
+	return ret;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	.read_dbi = histb_pcie_read_dbi,
+	.write_dbi = histb_pcie_write_dbi,
+	.link_up = histb_pcie_link_up,
+};
+
+static int histb_pcie_probe(struct platform_device *pdev)
+{
+	struct histb_pcie *hipcie;
+	struct dw_pcie *pci;
+	struct pcie_port *pp;
+	struct resource *res;
+	struct device_node *np = pdev->dev.of_node;
+	struct device *dev = &pdev->dev;
+	enum of_gpio_flags of_flags;
+	unsigned long flag = GPIOF_DIR_OUT;
+	int ret;
+
+	hipcie = devm_kzalloc(dev, sizeof(*hipcie), GFP_KERNEL);
+	if (!hipcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	hipcie->pci = pci;
+	pp = &pci->pp;
+	pci->dev = dev;
+	pci->ops = &dw_pcie_ops;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control");
+	hipcie->ctrl = devm_ioremap_resource(dev, res);
+	if (IS_ERR(hipcie->ctrl)) {
+		dev_err(dev, "cannot get control reg base\n");
+		return PTR_ERR(hipcie->ctrl);
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc-dbi");
+	pci->dbi_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pci->dbi_base)) {
+		dev_err(dev, "cannot get rc-dbi base\n");
+		return PTR_ERR(pci->dbi_base);
+	}
+
+	hipcie->vpcie = devm_regulator_get_optional(dev, "vpcie");
+	if (IS_ERR(hipcie->vpcie)) {
+		if (PTR_ERR(hipcie->vpcie) != -ENODEV)
+			return PTR_ERR(hipcie->vpcie);
+		hipcie->vpcie = NULL;
+	}
+
+	hipcie->reset_gpio = of_get_named_gpio_flags(np,
+				"reset-gpios", 0, &of_flags);
+	if (of_flags & OF_GPIO_ACTIVE_LOW)
+		flag |= GPIOF_ACTIVE_LOW;
+	if (gpio_is_valid(hipcie->reset_gpio)) {
+		ret = devm_gpio_request_one(dev, hipcie->reset_gpio,
+				flag, "PCIe device power control");
+		if (ret) {
+			dev_err(dev, "unable to request gpio\n");
+			return ret;
+		}
+	}
+
+	hipcie->aux_clk = devm_clk_get(dev, "aux");
+	if (IS_ERR(hipcie->aux_clk)) {
+		dev_err(dev, "Failed to get PCIe aux clk\n");
+		return PTR_ERR(hipcie->aux_clk);
+	}
+
+	hipcie->pipe_clk = devm_clk_get(dev, "pipe");
+	if (IS_ERR(hipcie->pipe_clk)) {
+		dev_err(dev, "Failed to get PCIe pipe clk\n");
+		return PTR_ERR(hipcie->pipe_clk);
+	}
+
+	hipcie->sys_clk = devm_clk_get(dev, "sys");
+	if (IS_ERR(hipcie->sys_clk)) {
+		dev_err(dev, "Failed to get PCIEe sys clk\n");
+		return PTR_ERR(hipcie->sys_clk);
+	}
+
+	hipcie->bus_clk = devm_clk_get(dev, "bus");
+	if (IS_ERR(hipcie->bus_clk)) {
+		dev_err(dev, "Failed to get PCIe bus clk\n");
+		return PTR_ERR(hipcie->bus_clk);
+	}
+
+	hipcie->soft_reset = devm_reset_control_get(dev, "soft");
+	if (IS_ERR(hipcie->soft_reset)) {
+		dev_err(dev, "couldn't get soft reset\n");
+		return PTR_ERR(hipcie->soft_reset);
+	}
+
+	hipcie->sys_reset = devm_reset_control_get(dev, "sys");
+	if (IS_ERR(hipcie->sys_reset)) {
+		dev_err(dev, "couldn't get sys reset\n");
+		return PTR_ERR(hipcie->sys_reset);
+	}
+
+	hipcie->bus_reset = devm_reset_control_get(dev, "bus");
+	if (IS_ERR(hipcie->bus_reset)) {
+		dev_err(dev, "couldn't get bus reset\n");
+		return PTR_ERR(hipcie->bus_reset);
+	}
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		pp->msi_irq = platform_get_irq_byname(pdev, "msi");
+		if (pp->msi_irq < 0) {
+			dev_err(dev, "Failed to get MSI IRQ\n");
+			return pp->msi_irq;
+		}
+	}
+
+	hipcie->phy = devm_phy_get(dev, "phy");
+	if (IS_ERR(hipcie->phy)) {
+		dev_info(dev, "no pcie-phy found\n");
+		hipcie->phy = NULL;
+		/* fall through here!
+		 * if no pcie-phy found, phy init
+		 * should be done under boot!
+		 */
+	} else {
+		phy_init(hipcie->phy);
+	}
+
+	pp->ops = &histb_pcie_host_ops;
+
+	platform_set_drvdata(pdev, hipcie);
+
+	ret = histb_pcie_host_enable(pp);
+	if (ret) {
+		dev_err(dev, "failed to enable host\n");
+		return ret;
+	}
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "failed to initialize host\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int histb_pcie_remove(struct platform_device *pdev)
+{
+	struct histb_pcie *hipcie = platform_get_drvdata(pdev);
+
+	histb_pcie_host_disable(hipcie);
+
+	if (hipcie->phy)
+		phy_exit(hipcie->phy);
+
+	return 0;
+}
+
+static const struct of_device_id histb_pcie_of_match[] = {
+	{ .compatible = "hisilicon,hi3798cv200-pcie", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, histb_pcie_of_match);
+
+static struct platform_driver histb_pcie_platform_driver = {
+	.probe	= histb_pcie_probe,
+	.remove	= histb_pcie_remove,
+	.driver = {
+		.name = "histb-pcie",
+		.of_match_table = histb_pcie_of_match,
+	},
+};
+module_platform_driver(histb_pcie_platform_driver);
+
+MODULE_DESCRIPTION("HiSilicon STB PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-kestrel.c b/marvell/linux/drivers/pci/controller/dwc/pcie-kestrel.c
new file mode 100644
index 0000000..c8b367a
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-kestrel.c
@@ -0,0 +1,714 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for ASR ASR1901 SoCs
+ *
+ * SR1901 PCIe Glue Layer Source Code
+ *
+ * Copyright (C) 2022 ASR Technology Group Ltd.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/of_pci.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/pm_qos.h>
+#include <linux/cputype.h>
+#include <soc/asr/regs-addr.h>
+#include "pcie-designware.h"
+
+#define DEVICE_NAME "ASR1901 PCIe Host"
+
+#define APMU_PCIE_CLK_RES_CTRL		0x3CC
+#define APMU_PCIE_CTRL_LOGIC		0x3D0
+#define APMU_PCIE2_CLK_RES_CTRL		0x3E4
+#define APMU_PCIE2_CTRL_LOGIC		0x3E8
+#define APMU_USB3PHY0_CTRL0		0x3B8
+
+#define HSIO_RC_R_CAL_STATUS		(0x15c)
+#define PCIE_CAL_DONE			(0x3<<14)
+
+#define LANE1_OFFSET			0x400
+#define	LTSSM_EN			(0x1 << 6)
+#define	APP_HOLD_PHY_RST		(0x1 << 30)
+#define	DEVICE_TYPE_RC			(0x1 << 31)		/* BIT31 0: EP, 1: RC*/
+
+/* PCIe Config registers */
+/* PCIe controller wrapper ASR configuration registers */
+#define	PCIE_AHB_IRQ			0x0000
+#define	IRQ_EN				0x1
+
+#define	PCIE_AHB_LINK_STS		0x0004
+#define DLL_LINK_UP            		(0x1 << 12)
+#define PHY_LINK_UP            		(0x1 << 1)
+#define LTSSM_L0			(0x11 << 6)
+#define LTSSM_STS			(0x3f << 6)
+
+#define PCIE_AHB_LEGACY_INT		0x0008
+#define PLL_READY			(0x1)
+
+#define	PCIE_AHB_IRQENABLE_SET_INTX	0x000c
+#define	INTA				(0x1 << 6)
+#define	INTB				(0x1 << 7)
+#define	INTC				(0x1 << 8)
+#define	INTD				(0x1 << 9)
+#define	LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
+#define INTX_MASK			GENMASK(9, 6)
+#define INTX_SHIFT			6
+
+#define	PCIE_AHB_IRQSTATUS		0x0010
+#define	PCIE_AHB_IRQENABLE_SET		0x0014
+#define	MSI_INT				(0x1 << 11)
+#define	DLL_LINK_INT			(0x1 << 20)
+
+/* PCIe PHY registers */
+#define PUPHY_CLK_CFG			0x8
+#define PUPHY_MODE_CFG			0x0c
+#define PUPHY_ERROR_STATUS		0x10
+#define PUPHY_OVERRIDE			0x18
+#define PUPHY_RC_REG			0x44
+#define PUPHY_PCIE3X2_REG		0x50
+#define PUPHY_PLL_REG1			0x58
+#define PUPHY_PLL_REG2			0x5c
+#define PUPHY_RX_REG1               	0x60
+#define PUPHY_TX_REG                	0x74
+#define PUPHY_TEST_REG			0x78
+#define PUPHY_TEST_INFO			0x84
+
+#define OVRD_MPU_U3			(0x1 << 17)
+#define CFG_MPU_U3			(0x1 << 16)
+#define OVRD_PU_RX_LFPS			(0x1 << 15)
+
+#define LINK_WAIT_MIN			900
+#define LINK_WAIT_MAX			1000
+/* Time for delay */
+#define REF_PERST_MIN			20000
+#define REF_PERST_MAX			25000
+#define PERST_ACCESS_MIN		10000
+#define PERST_ACCESS_MAX		12000
+
+#define to_kst_pcie(x)	dev_get_drvdata((x)->dev)
+
+struct kst_pcie {
+	struct device *dev;
+	struct dw_pcie *pci;
+	void __iomem        *phy_base;
+	void __iomem        *app_base;
+	void __iomem        *usb3_base;
+	struct clk			*clk;
+	struct phy			*phy[2];
+	unsigned int	phy_count;
+	unsigned int	slot;
+	unsigned int	lanes;
+	void __iomem 	*pcie_pmua_reg;
+	s32				lpm_qos;
+	int				irq;
+	int				gpio_reset;
+	struct pm_qos_request   qos_idle;
+};
+
+extern u32 usb31_rterm_cal_value;
+
+static inline void kst_phy_writel(struct kst_pcie *pcie, u32 val, u32 reg)
+{
+	writel(val, pcie->phy_base + reg);
+}
+static inline u32 kst_phy_readl(struct kst_pcie *pcie, u32 reg)
+{
+	return readl(pcie->phy_base + reg);
+}
+static inline void kst_app_writel(struct kst_pcie *pcie, u32 val, u32 reg)
+{
+	writel(val, pcie->app_base + reg);
+}
+static inline u32 kst_app_readl(struct kst_pcie *pcie, u32 reg)
+{
+	return readl(pcie->app_base + reg);
+}
+
+#ifndef CONFIG_USB_DWC3
+static inline void kst_usb30phy_writel(struct kst_pcie *pcie, u32 val, u32 reg)
+{
+	writel(val, pcie->usb3_base + reg);
+}
+static inline u32 kst_usb30phy_readl(struct kst_pcie *pcie, u32 reg)
+{
+	return readl(pcie->usb3_base + reg);
+}
+int rterm_val, cali_done = 0;
+#define USB_CALI_TIMEOUT 50000
+static void kst_usb_cali_phy(struct kst_pcie *kst_pcie)
+{
+	u32 val, timeout = 0;
+	if (cali_done == 1)
+		return;
+
+	cali_done = 1;
+	writel(0x1e00b, regs_addr_get_va(REGS_ADDR_APMU) + APMU_USB3PHY0_CTRL0);
+
+	val = kst_usb30phy_readl(kst_pcie, PUPHY_PLL_REG2);
+	kst_usb30phy_writel(kst_pcie, val&(~(0x1<<21)), PUPHY_PLL_REG2);
+
+	val = kst_usb30phy_readl(kst_pcie, PUPHY_PLL_REG1);
+	kst_usb30phy_writel(kst_pcie, val&(0xFFFFC0FF), PUPHY_PLL_REG1);
+
+	do {
+		val = kst_usb30phy_readl(kst_pcie, PUPHY_TEST_INFO);
+		udelay(10);
+		timeout++;
+		if (timeout > USB_CALI_TIMEOUT)
+			break;
+	} while (((val>>24)&0x1) == 0);
+
+	val = kst_usb30phy_readl(kst_pcie, PUPHY_TEST_INFO);
+	val = kst_usb30phy_readl(kst_pcie, PUPHY_TEST_INFO);
+	pr_debug("usb rterm = %08x\r\n", (val>>8) & 0x000000FF);
+	writel(0xb, regs_addr_get_va(REGS_ADDR_APMU) + APMU_USB3PHY0_CTRL0);
+
+	rterm_val = val;
+}
+
+static void kst2_usb_cali_phy(void)
+{
+	u32 value;
+	int timeout = 0;
+	void __iomem *apbs_base = regs_addr_get_va(REGS_ADDR_APBS);
+
+	value = readl(apbs_base + HSIO_RC_R_CAL_STATUS);
+	value &= ~0x1;
+	writel(value, apbs_base + HSIO_RC_R_CAL_STATUS);
+
+	do {
+		value = readl(apbs_base + HSIO_RC_R_CAL_STATUS);
+		udelay(10);
+		timeout++;
+		if (timeout > USB_CALI_TIMEOUT) {
+			pr_err("PHY Calibration fail");
+			break;
+		}
+	} while ((value&PCIE_CAL_DONE) != PCIE_CAL_DONE);
+}
+
+#endif
+
+static int kst_pcie_init_phy(struct kst_pcie *kst_pcie)
+{
+	u32 val, data;
+	int count = 0;
+
+	/* release pcie reset and enable pcie axi clk */
+	__raw_writel(0x8000043f, kst_pcie->pcie_pmua_reg);
+	val = __raw_readl(kst_pcie->pcie_pmua_reg);
+	val |= DEVICE_TYPE_RC;
+	val &= ~APP_HOLD_PHY_RST;
+	__raw_writel(val, kst_pcie->pcie_pmua_reg);
+	/* enable port0 dbi aclock for port1 only case */
+	if (kst_pcie->slot == 1) {
+		val = __raw_readl(regs_addr_get_va(REGS_ADDR_APMU)
+                       + APMU_PCIE_CLK_RES_CTRL);
+		if (!(val&0x9)) {
+			val |= (0x80000009);
+			__raw_writel(val, regs_addr_get_va(REGS_ADDR_APMU)
+				+ APMU_PCIE_CLK_RES_CTRL);
+		}
+	}
+
+	val = kst_phy_readl(kst_pcie, PUPHY_OVERRIDE);
+	val |= (OVRD_MPU_U3 | OVRD_PU_RX_LFPS);
+	kst_phy_writel(kst_pcie, val, PUPHY_OVERRIDE);
+	if (kst_pcie->lanes == 2) {
+		val = kst_phy_readl(kst_pcie, PUPHY_OVERRIDE + LANE1_OFFSET);
+		val |= (OVRD_MPU_U3 | OVRD_PU_RX_LFPS);
+		kst_phy_writel(kst_pcie, val, PUPHY_OVERRIDE + LANE1_OFFSET);
+	}
+
+	val = kst_phy_readl(kst_pcie, PUPHY_RC_REG);
+	val = (val & 0xFFFF0000) | (0x1387);
+	kst_phy_writel(kst_pcie, val, PUPHY_RC_REG);
+
+	if (cpu_is_asr1901_a0_plus()) {
+#ifndef CONFIG_USB_DWC3
+        kst_usb_cali_phy(kst_pcie);
+        val = rterm_val;
+#else
+        val = usb31_rterm_cal_value;
+#endif
+        data = kst_phy_readl(kst_pcie, PUPHY_RC_REG);
+        data = (data & 0xffffff00) | ((val>>8) & 0xFF);
+        kst_phy_writel(kst_pcie, data, PUPHY_RC_REG);
+        pr_debug("pcie rterm = %08x\r\n", kst_phy_readl(kst_pcie, PUPHY_RC_REG));
+    }
+#ifndef CONFIG_USB_DWC3
+    if (cpu_is_asr1906()) {
+        kst2_usb_cali_phy();
+    }
+#endif
+	val = kst_phy_readl(kst_pcie, PUPHY_TEST_REG);
+	val |= (0x1<<1);
+	kst_phy_writel(kst_pcie, val, PUPHY_TEST_REG);
+	if (kst_pcie->lanes == 2) {
+		val = kst_phy_readl(kst_pcie, PUPHY_TEST_REG + LANE1_OFFSET);
+		val |= (0x1<<1);
+		kst_phy_writel(kst_pcie, val, PUPHY_TEST_REG + LANE1_OFFSET);
+	}
+
+	val = kst_phy_readl(kst_pcie, PUPHY_OVERRIDE);
+	val &= ~(OVRD_MPU_U3 | OVRD_PU_RX_LFPS);
+	kst_phy_writel(kst_pcie, val, PUPHY_OVERRIDE);
+	if (kst_pcie->lanes == 2) {
+		val = kst_phy_readl(kst_pcie, PUPHY_OVERRIDE + LANE1_OFFSET);
+		val &= ~(OVRD_MPU_U3 | OVRD_PU_RX_LFPS);
+		kst_phy_writel(kst_pcie, val, PUPHY_OVERRIDE + LANE1_OFFSET);
+	}
+
+	val = kst_phy_readl(kst_pcie, PUPHY_PLL_REG1);
+	kst_phy_writel(kst_pcie, val&0xffff0fff, PUPHY_PLL_REG1);
+	val = kst_phy_readl(kst_pcie, PUPHY_PLL_REG1);
+	kst_phy_writel(kst_pcie, val|0xC000, PUPHY_PLL_REG1);
+	val = kst_phy_readl(kst_pcie, PUPHY_PLL_REG2);
+	kst_phy_writel(kst_pcie, val|(0x1<<20), PUPHY_PLL_REG2);
+	val = kst_phy_readl(kst_pcie, PUPHY_PLL_REG2);
+	kst_phy_writel(kst_pcie, val&(~(0x1<<21)), PUPHY_PLL_REG2);
+	kst_phy_writel(kst_pcie, 0x6505, PUPHY_PCIE3X2_REG);
+
+	val = kst_phy_readl(kst_pcie, PUPHY_PLL_REG1);
+	kst_phy_writel(kst_pcie, val&0xf0ffffff, PUPHY_PLL_REG1);
+	val = kst_phy_readl(kst_pcie, PUPHY_CLK_CFG);
+	kst_phy_writel(kst_pcie, 0xB7c, PUPHY_CLK_CFG);
+
+	if (kst_pcie->lanes == 1) {
+		pr_debug("PCIE only one Lane, disable Lane1...\r\n");
+		val = kst_phy_readl(kst_pcie, PUPHY_MODE_CFG + LANE1_OFFSET);
+		kst_phy_writel(kst_pcie, val|(0x1<<30), PUPHY_MODE_CFG + LANE1_OFFSET);
+	} else {
+		kst_phy_writel(kst_pcie, 0xB7c, PUPHY_CLK_CFG + LANE1_OFFSET);
+	}
+
+	val = kst_phy_readl(kst_pcie, PUPHY_MODE_CFG);
+	val |= (0x1<<2);
+	kst_phy_writel(kst_pcie, val, PUPHY_MODE_CFG);
+	if (kst_pcie->lanes == 2) {
+		val = kst_phy_readl(kst_pcie, PUPHY_MODE_CFG + LANE1_OFFSET);
+		val |= (0x1<<2);
+		kst_phy_writel(kst_pcie, val, PUPHY_MODE_CFG + LANE1_OFFSET);
+	}
+
+	do {
+		val = kst_phy_readl(kst_pcie, PUPHY_CLK_CFG);
+		count++;
+		usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
+		if (count == 100) {
+			pr_info(DEVICE_NAME "PCIe wait pll ready timeout.\n");
+			return -EINVAL;
+		}
+	}while(( val & PLL_READY ) != PLL_READY);
+
+	return 0;
+}
+
+static int kst_pcie_disable_phy(struct kst_pcie *kst_pcie)
+{
+	u32 val;
+
+	val = kst_phy_readl(kst_pcie, PUPHY_OVERRIDE);
+	val |= OVRD_MPU_U3;
+	val &= ~CFG_MPU_U3;
+	kst_phy_writel(kst_pcie, val, PUPHY_OVERRIDE);
+
+	val = kst_phy_readl(kst_pcie, PUPHY_OVERRIDE + LANE1_OFFSET);
+	val |= OVRD_MPU_U3;
+	val &= ~CFG_MPU_U3;
+
+	kst_phy_writel(kst_pcie, val, PUPHY_OVERRIDE + LANE1_OFFSET);
+
+	return 0;
+}
+
+static int kst_pcie_reenable_phy(struct kst_pcie *kst_pcie)
+{
+	u32 val;
+
+	val = kst_phy_readl(kst_pcie, PUPHY_OVERRIDE);
+	val &= ~(OVRD_MPU_U3 | OVRD_PU_RX_LFPS);
+	kst_phy_writel(kst_pcie, val, PUPHY_OVERRIDE);
+	if (kst_pcie->lanes == 2) {
+		val = kst_phy_readl(kst_pcie, PUPHY_OVERRIDE + LANE1_OFFSET);
+		val &= ~(OVRD_MPU_U3 | OVRD_PU_RX_LFPS);
+		kst_phy_writel(kst_pcie, val, PUPHY_OVERRIDE + LANE1_OFFSET);
+	}
+
+	return 0;
+}
+
+static int kst_pcie_link_up(struct dw_pcie *pci)
+{
+	struct kst_pcie *kst_pcie = to_kst_pcie(pci);
+	u32 status = kst_app_readl(kst_pcie, PCIE_AHB_LINK_STS);
+
+	if ((status & DLL_LINK_UP) && (status & PHY_LINK_UP))
+		return 1;
+
+	return 0;
+}
+
+static int kst_pcie_establish_link(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct kst_pcie *kst_pcie = to_kst_pcie(pci);
+	struct device *dev = kst_pcie->pci->dev;
+	unsigned int val, count = 0;
+
+	if (kst_pcie_link_up(pci))
+		return 0;
+
+	val = __raw_readl(kst_pcie->pcie_pmua_reg);
+	__raw_writel(val | DEVICE_TYPE_RC, kst_pcie->pcie_pmua_reg);
+
+	dw_pcie_setup_rc(pp);
+#if 0
+	val = readl(pci->dbi_base + 0x8a8);
+	val &= ~(0xffff<<8);
+	val |= (0x10<<8);				//set TX preset P4
+	writel(val, pci->dbi_base + 0x8a8);
+#endif
+	/* Release app_hold_phy_reset and enable ltssm */
+	val = __raw_readl(kst_pcie->pcie_pmua_reg);
+	val |= LTSSM_EN;
+	val &= ~APP_HOLD_PHY_RST;
+	__raw_writel(val, kst_pcie->pcie_pmua_reg);
+
+	udelay(10);
+	val = readl(pci->dbi_base + 0x80c);
+	val |= 1<<17;
+	writel(val, pci->dbi_base + 0x80c);
+	udelay(10);
+
+	do {
+		usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
+		val = kst_app_readl(kst_pcie, PCIE_AHB_LINK_STS);
+		pr_debug("%s, ltssm: 0x%x.\n", __func__, val);
+		count++;
+		if (count == 1000) {
+			pr_info(DEVICE_NAME "PCIe enter L0 failed, ltssm: 0x%x\n", val);
+			return -EINVAL;
+		}
+	}while(( val & LTSSM_STS ) != LTSSM_L0 );
+
+	count = 0;
+	/* check if the link is up or not */
+	while (!kst_pcie_link_up(pci)) {
+		usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
+		count++;
+		if (count == 100) {
+			dev_err(dev, "Link Fail\n");
+			return -EINVAL;
+		}
+	}
+
+	val = readl(pci->dbi_base + 0x80);
+	pr_info(DEVICE_NAME " %dx link negotiated (gen %d)\n",
+				(val>>20)&0xf, (val>>16)&0xf);
+
+	return 0;
+}
+
+static int kst_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct kst_pcie *kst_pcie = to_kst_pcie(pci);
+	int ret;
+
+	ret = kst_pcie_establish_link(pp);
+	if(ret) {
+		__raw_writel(0x0, kst_pcie->pcie_pmua_reg);
+		return -1;
+	}
+	return 0;
+}
+static const struct dw_pcie_host_ops kst_pcie_host_ops = {
+	.host_init = kst_pcie_host_init,
+};
+
+#ifndef CONFIG_PCI_MSI
+static irqreturn_t kst_pcie_irq_handler(int irq, void *arg)
+{
+	struct kst_pcie *kst_pcie = arg;
+	u32 status;
+
+	pm_wakeup_event(kst_pcie->dev, 2000);
+	status = kst_app_readl(kst_pcie, PCIE_AHB_LEGACY_INT);
+	kst_app_writel(kst_pcie, status, PCIE_AHB_LEGACY_INT);
+
+	return IRQ_HANDLED;
+}
+#endif
+#ifdef CONFIG_PCI_MSI
+static int kst_pcie_add_msi(struct dw_pcie *pci,
+				struct platform_device *pdev)
+{
+	int irq;
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		irq = platform_get_irq(pdev, 0);
+		if (irq < 0) {
+			dev_err(&pdev->dev,
+				"failed to get MSI IRQ (%d)\n", irq);
+			return irq;
+		}
+		pci->pp.msi_irq = irq;
+	}
+
+	return 0;
+}
+#endif
+
+static void kst_pcie_enable_interrupts(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct kst_pcie *kst_pcie = to_kst_pcie(pci);
+	u32 val;
+
+#ifdef CONFIG_PCI_MSI
+	dw_pcie_msi_init(pp);
+
+	val = kst_app_readl(kst_pcie, PCIE_AHB_IRQENABLE_SET);
+	val |= MSI_INT;
+	kst_app_writel(kst_pcie, val, PCIE_AHB_IRQENABLE_SET);
+#else	/* legacy interrupt */
+	val = kst_app_readl(kst_pcie, PCIE_AHB_IRQENABLE_SET_INTX);
+	val |= LEG_EP_INTERRUPTS;
+	kst_app_writel(kst_pcie, val, PCIE_AHB_IRQENABLE_SET_INTX);
+#endif
+	val = kst_app_readl(kst_pcie, PCIE_AHB_IRQ);
+	val |= IRQ_EN;
+	kst_app_writel(kst_pcie, val, PCIE_AHB_IRQ);
+
+	return;
+}
+
+static int kst_add_pcie_port(struct kst_pcie *pcie,
+				  struct platform_device *pdev)
+{
+	struct dw_pcie *pci = pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	pp->ops = &kst_pcie_host_ops;
+#ifdef CONFIG_PCI_MSI
+	ret = kst_pcie_add_msi(pci, pdev);
+	if (ret)
+		return ret;
+#else
+	pp->irq = platform_get_irq(pdev, 0);
+	if (pp->irq < 0) {
+		dev_err(dev, "failed to get irq for port\n");
+		return pp->irq;
+	}
+	ret = devm_request_irq(dev, pp->irq, kst_pcie_irq_handler,
+			       IRQF_SHARED, "kst-pcie", pcie);
+	if (ret) {
+		dev_err(dev, "failed to request irq %d\n", pp->irq);
+		return ret;
+	}
+#endif
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "failed to initialize host: %d\n", ret);
+		return ret;
+	}
+	kst_pcie_enable_interrupts(pp);
+
+	return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	.link_up = kst_pcie_link_up,
+};
+
+static long kst_pcie_get_resource(struct kst_pcie *kst_pcie,
+				    struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *phy;
+	struct resource *dbi;
+	struct device_node *np = pdev->dev.of_node;
+	const __be32 *prop;
+	unsigned int proplen;
+
+	phy = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-phy");
+	kst_pcie->phy_base = devm_ioremap_resource(dev, phy);
+	if (IS_ERR(kst_pcie->phy_base))
+		return PTR_ERR(kst_pcie->phy_base);
+	kst_pcie->app_base = kst_pcie->phy_base + 0x10000;
+
+	dbi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-dbi");
+	kst_pcie->pci->dbi_base = devm_ioremap_resource(dev, dbi);
+	if (IS_ERR(kst_pcie->pci->dbi_base))
+		return PTR_ERR(kst_pcie->pci->dbi_base);
+#ifndef CONFIG_USB_DWC3
+	if (cali_done == 0) {	//for 2 pcie only remap 1 usb3-phy address
+		dbi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "usb3-phy");
+		kst_pcie->usb3_base = devm_ioremap_resource(dev, dbi);
+		if (IS_ERR(kst_pcie->usb3_base))
+			return PTR_ERR(kst_pcie->usb3_base);
+	}
+#endif
+	if (of_property_read_u32(np, "num-lanes", &(kst_pcie->lanes))) {
+		pr_err("Failed to parse the PCIE0 or PCIE1 lane number\n");
+		return -EINVAL;
+	}
+
+	prop = of_get_property(np, "lpm-qos", &proplen);
+	if (!prop) {
+		pr_err("lpm-qos config in DT for PCIe is not defined\n");
+		return -EINVAL;
+	} else
+		kst_pcie->lpm_qos = be32_to_cpup(prop);
+
+	if (of_property_read_u32(np, "num-slot", &(kst_pcie->slot))) {
+		pr_err("Failed to parse the PCIE0 or PCIE1\n");
+		return -EINVAL;
+	}
+	kst_pcie->gpio_reset = of_get_named_gpio(np, "reset-gpios", 0);
+	if (kst_pcie->gpio_reset < 0)
+		return -ENODEV;
+
+	if (kst_pcie->slot == 0)
+		kst_pcie->pcie_pmua_reg = regs_addr_get_va(REGS_ADDR_APMU)
+					+ APMU_PCIE_CLK_RES_CTRL;
+	else if (kst_pcie->slot == 1)
+		kst_pcie->pcie_pmua_reg = regs_addr_get_va(REGS_ADDR_APMU)
+					+ APMU_PCIE2_CLK_RES_CTRL;
+
+	return 0;
+}
+
+static int kst_pcie_probe(struct platform_device *pdev)
+{
+	struct dw_pcie *pci;
+	struct kst_pcie *pcie;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+	if (!pcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	pci->dev = dev;
+	pci->ops = &dw_pcie_ops;
+	pcie->pci = pci;
+
+	kst_pcie_get_resource(pcie, pdev);
+
+	pcie->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(pcie->clk))
+		return PTR_ERR(pcie->clk);
+
+	ret = clk_prepare_enable(pcie->clk);
+	if (ret)
+		return ret;
+
+	ret = kst_pcie_init_phy(pcie);
+	if (ret)
+		goto fail_clk;
+
+	/* perst assert Endpoint */
+	if (!gpio_request(pcie->gpio_reset, "pcie_perst")) {
+		usleep_range(REF_PERST_MIN, REF_PERST_MAX);
+		ret = gpio_direction_output(pcie->gpio_reset, 1);
+		if (ret) {
+			pr_info(DEVICE_NAME "PCIE reset device failed.\r\n");
+			goto disable_phy;
+		}
+		gpio_free(pcie->gpio_reset);
+		usleep_range(PERST_ACCESS_MIN, PERST_ACCESS_MAX);
+	}
+	platform_set_drvdata(pdev, pcie);
+
+	ret = kst_add_pcie_port(pcie, pdev);
+	if (ret)
+		goto disable_phy;
+
+	device_init_wakeup(&pdev->dev, 1);
+	pm_qos_add_request(&pcie->qos_idle, PM_QOS_CPUIDLE_BLOCK,
+			PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
+	pcie->qos_idle.name = pdev->name;
+	pm_qos_update_request(&pcie->qos_idle, pcie->lpm_qos);
+
+	return 0;
+
+disable_phy:
+	kst_pcie_disable_phy(pcie);
+fail_clk:
+	clk_disable_unprepare(pcie->clk);
+
+	return ret;
+}
+#ifdef CONFIG_PM_SLEEP
+static int __maybe_unused kst_pcie_suspend_noirq(struct device *dev)
+{
+	struct kst_pcie *pcie = dev_get_drvdata(dev);
+
+	kst_pcie_disable_phy(pcie);
+	pm_qos_update_request(&pcie->qos_idle,
+			PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
+
+	return 0;
+}
+
+static int __maybe_unused kst_pcie_resume_noirq(struct device *dev)
+{
+	struct kst_pcie *pcie = dev_get_drvdata(dev);
+
+	kst_pcie_reenable_phy(pcie);
+	pm_qos_update_request(&pcie->qos_idle, pcie->lpm_qos);
+
+	return 0;
+}
+
+static const struct dev_pm_ops kst_pcie_pm_ops = {
+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(kst_pcie_suspend_noirq,
+				      kst_pcie_resume_noirq)
+};
+#endif
+
+static const struct of_device_id kst_pcie_of_match[] = {
+	{ .compatible = "asr,kst-pcie", },
+	{},
+};
+
+static struct platform_driver kst_pcie_driver = {
+	.probe		= kst_pcie_probe,
+	.driver = {
+		.name	= "kst-pcie",
+		.of_match_table = of_match_ptr(kst_pcie_of_match),
+		.suppress_bind_attrs = true,
+#ifdef CONFIG_PM_SLEEP
+		.pm = &kst_pcie_pm_ops,
+#endif
+	},
+};
+
+static int __init kst_pcie_init(void)
+{
+	return platform_driver_probe(&kst_pcie_driver, kst_pcie_probe);
+}
+device_initcall_sync(kst_pcie_init);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-kirin.c b/marvell/linux/drivers/pci/controller/dwc/pcie-kirin.c
new file mode 100644
index 0000000..c19617a
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-kirin.c
@@ -0,0 +1,543 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Kirin Phone SoCs
+ *
+ * Copyright (C) 2017 HiSilicon Electronics Co., Ltd.
+ *		http://www.huawei.com
+ *
+ * Author: Xiaowei Song <songxiaowei@huawei.com>
+ */
+
+#include <linux/compiler.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+#include "pcie-designware.h"
+
+#define to_kirin_pcie(x) dev_get_drvdata((x)->dev)
+
+#define REF_CLK_FREQ			100000000
+
+/* PCIe ELBI registers */
+#define SOC_PCIECTRL_CTRL0_ADDR		0x000
+#define SOC_PCIECTRL_CTRL1_ADDR		0x004
+#define SOC_PCIEPHY_CTRL2_ADDR		0x008
+#define SOC_PCIEPHY_CTRL3_ADDR		0x00c
+#define PCIE_ELBI_SLV_DBI_ENABLE	(0x1 << 21)
+
+/* info located in APB */
+#define PCIE_APP_LTSSM_ENABLE	0x01c
+#define PCIE_APB_PHY_CTRL0	0x0
+#define PCIE_APB_PHY_CTRL1	0x4
+#define PCIE_APB_PHY_STATUS0	0x400
+#define PCIE_LINKUP_ENABLE	(0x8020)
+#define PCIE_LTSSM_ENABLE_BIT	(0x1 << 11)
+#define PIPE_CLK_STABLE		(0x1 << 19)
+#define PHY_REF_PAD_BIT		(0x1 << 8)
+#define PHY_PWR_DOWN_BIT	(0x1 << 22)
+#define PHY_RST_ACK_BIT		(0x1 << 16)
+
+/* info located in sysctrl */
+#define SCTRL_PCIE_CMOS_OFFSET	0x60
+#define SCTRL_PCIE_CMOS_BIT	0x10
+#define SCTRL_PCIE_ISO_OFFSET	0x44
+#define SCTRL_PCIE_ISO_BIT	0x30
+#define SCTRL_PCIE_HPCLK_OFFSET	0x190
+#define SCTRL_PCIE_HPCLK_BIT	0x184000
+#define SCTRL_PCIE_OE_OFFSET	0x14a
+#define PCIE_DEBOUNCE_PARAM	0xF0F400
+#define PCIE_OE_BYPASS		(0x3 << 28)
+
+/* peri_crg ctrl */
+#define CRGCTRL_PCIE_ASSERT_OFFSET	0x88
+#define CRGCTRL_PCIE_ASSERT_BIT		0x8c000000
+
+/* Time for delay */
+#define REF_2_PERST_MIN		20000
+#define REF_2_PERST_MAX		25000
+#define PERST_2_ACCESS_MIN	10000
+#define PERST_2_ACCESS_MAX	12000
+#define LINK_WAIT_MIN		900
+#define LINK_WAIT_MAX		1000
+#define PIPE_CLK_WAIT_MIN	550
+#define PIPE_CLK_WAIT_MAX	600
+#define TIME_CMOS_MIN		100
+#define TIME_CMOS_MAX		105
+#define TIME_PHY_PD_MIN		10
+#define TIME_PHY_PD_MAX		11
+
+struct kirin_pcie {
+	struct dw_pcie	*pci;
+	void __iomem	*apb_base;
+	void __iomem	*phy_base;
+	struct regmap	*crgctrl;
+	struct regmap	*sysctrl;
+	struct clk	*apb_sys_clk;
+	struct clk	*apb_phy_clk;
+	struct clk	*phy_ref_clk;
+	struct clk	*pcie_aclk;
+	struct clk	*pcie_aux_clk;
+	int		gpio_id_reset;
+};
+
+/* Registers in PCIeCTRL */
+static inline void kirin_apb_ctrl_writel(struct kirin_pcie *kirin_pcie,
+					 u32 val, u32 reg)
+{
+	writel(val, kirin_pcie->apb_base + reg);
+}
+
+static inline u32 kirin_apb_ctrl_readl(struct kirin_pcie *kirin_pcie, u32 reg)
+{
+	return readl(kirin_pcie->apb_base + reg);
+}
+
+/* Registers in PCIePHY */
+static inline void kirin_apb_phy_writel(struct kirin_pcie *kirin_pcie,
+					u32 val, u32 reg)
+{
+	writel(val, kirin_pcie->phy_base + reg);
+}
+
+static inline u32 kirin_apb_phy_readl(struct kirin_pcie *kirin_pcie, u32 reg)
+{
+	return readl(kirin_pcie->phy_base + reg);
+}
+
+static long kirin_pcie_get_clk(struct kirin_pcie *kirin_pcie,
+			       struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+
+	kirin_pcie->phy_ref_clk = devm_clk_get(dev, "pcie_phy_ref");
+	if (IS_ERR(kirin_pcie->phy_ref_clk))
+		return PTR_ERR(kirin_pcie->phy_ref_clk);
+
+	kirin_pcie->pcie_aux_clk = devm_clk_get(dev, "pcie_aux");
+	if (IS_ERR(kirin_pcie->pcie_aux_clk))
+		return PTR_ERR(kirin_pcie->pcie_aux_clk);
+
+	kirin_pcie->apb_phy_clk = devm_clk_get(dev, "pcie_apb_phy");
+	if (IS_ERR(kirin_pcie->apb_phy_clk))
+		return PTR_ERR(kirin_pcie->apb_phy_clk);
+
+	kirin_pcie->apb_sys_clk = devm_clk_get(dev, "pcie_apb_sys");
+	if (IS_ERR(kirin_pcie->apb_sys_clk))
+		return PTR_ERR(kirin_pcie->apb_sys_clk);
+
+	kirin_pcie->pcie_aclk = devm_clk_get(dev, "pcie_aclk");
+	if (IS_ERR(kirin_pcie->pcie_aclk))
+		return PTR_ERR(kirin_pcie->pcie_aclk);
+
+	return 0;
+}
+
+static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
+				    struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *apb;
+	struct resource *phy;
+	struct resource *dbi;
+
+	apb = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb");
+	kirin_pcie->apb_base = devm_ioremap_resource(dev, apb);
+	if (IS_ERR(kirin_pcie->apb_base))
+		return PTR_ERR(kirin_pcie->apb_base);
+
+	phy = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
+	kirin_pcie->phy_base = devm_ioremap_resource(dev, phy);
+	if (IS_ERR(kirin_pcie->phy_base))
+		return PTR_ERR(kirin_pcie->phy_base);
+
+	dbi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+	kirin_pcie->pci->dbi_base = devm_ioremap_resource(dev, dbi);
+	if (IS_ERR(kirin_pcie->pci->dbi_base))
+		return PTR_ERR(kirin_pcie->pci->dbi_base);
+
+	kirin_pcie->crgctrl =
+		syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl");
+	if (IS_ERR(kirin_pcie->crgctrl))
+		return PTR_ERR(kirin_pcie->crgctrl);
+
+	kirin_pcie->sysctrl =
+		syscon_regmap_lookup_by_compatible("hisilicon,hi3660-sctrl");
+	if (IS_ERR(kirin_pcie->sysctrl))
+		return PTR_ERR(kirin_pcie->sysctrl);
+
+	return 0;
+}
+
+static int kirin_pcie_phy_init(struct kirin_pcie *kirin_pcie)
+{
+	struct device *dev = kirin_pcie->pci->dev;
+	u32 reg_val;
+
+	reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1);
+	reg_val &= ~PHY_REF_PAD_BIT;
+	kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1);
+
+	reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL0);
+	reg_val &= ~PHY_PWR_DOWN_BIT;
+	kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL0);
+	usleep_range(TIME_PHY_PD_MIN, TIME_PHY_PD_MAX);
+
+	reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1);
+	reg_val &= ~PHY_RST_ACK_BIT;
+	kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1);
+
+	usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX);
+	reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_STATUS0);
+	if (reg_val & PIPE_CLK_STABLE) {
+		dev_err(dev, "PIPE clk is not stable\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void kirin_pcie_oe_enable(struct kirin_pcie *kirin_pcie)
+{
+	u32 val;
+
+	regmap_read(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, &val);
+	val |= PCIE_DEBOUNCE_PARAM;
+	val &= ~PCIE_OE_BYPASS;
+	regmap_write(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, val);
+}
+
+static int kirin_pcie_clk_ctrl(struct kirin_pcie *kirin_pcie, bool enable)
+{
+	int ret = 0;
+
+	if (!enable)
+		goto close_clk;
+
+	ret = clk_set_rate(kirin_pcie->phy_ref_clk, REF_CLK_FREQ);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(kirin_pcie->phy_ref_clk);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(kirin_pcie->apb_sys_clk);
+	if (ret)
+		goto apb_sys_fail;
+
+	ret = clk_prepare_enable(kirin_pcie->apb_phy_clk);
+	if (ret)
+		goto apb_phy_fail;
+
+	ret = clk_prepare_enable(kirin_pcie->pcie_aclk);
+	if (ret)
+		goto aclk_fail;
+
+	ret = clk_prepare_enable(kirin_pcie->pcie_aux_clk);
+	if (ret)
+		goto aux_clk_fail;
+
+	return 0;
+
+close_clk:
+	clk_disable_unprepare(kirin_pcie->pcie_aux_clk);
+aux_clk_fail:
+	clk_disable_unprepare(kirin_pcie->pcie_aclk);
+aclk_fail:
+	clk_disable_unprepare(kirin_pcie->apb_phy_clk);
+apb_phy_fail:
+	clk_disable_unprepare(kirin_pcie->apb_sys_clk);
+apb_sys_fail:
+	clk_disable_unprepare(kirin_pcie->phy_ref_clk);
+
+	return ret;
+}
+
+static int kirin_pcie_power_on(struct kirin_pcie *kirin_pcie)
+{
+	int ret;
+
+	/* Power supply for Host */
+	regmap_write(kirin_pcie->sysctrl,
+		     SCTRL_PCIE_CMOS_OFFSET, SCTRL_PCIE_CMOS_BIT);
+	usleep_range(TIME_CMOS_MIN, TIME_CMOS_MAX);
+	kirin_pcie_oe_enable(kirin_pcie);
+
+	ret = kirin_pcie_clk_ctrl(kirin_pcie, true);
+	if (ret)
+		return ret;
+
+	/* ISO disable, PCIeCtrl, PHY assert and clk gate clear */
+	regmap_write(kirin_pcie->sysctrl,
+		     SCTRL_PCIE_ISO_OFFSET, SCTRL_PCIE_ISO_BIT);
+	regmap_write(kirin_pcie->crgctrl,
+		     CRGCTRL_PCIE_ASSERT_OFFSET, CRGCTRL_PCIE_ASSERT_BIT);
+	regmap_write(kirin_pcie->sysctrl,
+		     SCTRL_PCIE_HPCLK_OFFSET, SCTRL_PCIE_HPCLK_BIT);
+
+	ret = kirin_pcie_phy_init(kirin_pcie);
+	if (ret)
+		goto close_clk;
+
+	/* perst assert Endpoint */
+	if (!gpio_request(kirin_pcie->gpio_id_reset, "pcie_perst")) {
+		usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX);
+		ret = gpio_direction_output(kirin_pcie->gpio_id_reset, 1);
+		if (ret)
+			goto close_clk;
+		usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX);
+
+		return 0;
+	}
+
+close_clk:
+	kirin_pcie_clk_ctrl(kirin_pcie, false);
+	return ret;
+}
+
+static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie,
+					   bool on)
+{
+	u32 val;
+
+	val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL0_ADDR);
+	if (on)
+		val = val | PCIE_ELBI_SLV_DBI_ENABLE;
+	else
+		val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
+
+	kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL0_ADDR);
+}
+
+static void kirin_pcie_sideband_dbi_r_mode(struct kirin_pcie *kirin_pcie,
+					   bool on)
+{
+	u32 val;
+
+	val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL1_ADDR);
+	if (on)
+		val = val | PCIE_ELBI_SLV_DBI_ENABLE;
+	else
+		val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
+
+	kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL1_ADDR);
+}
+
+static int kirin_pcie_rd_own_conf(struct pcie_port *pp,
+				  int where, int size, u32 *val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+	int ret;
+
+	kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true);
+	ret = dw_pcie_read(pci->dbi_base + where, size, val);
+	kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false);
+
+	return ret;
+}
+
+static int kirin_pcie_wr_own_conf(struct pcie_port *pp,
+				  int where, int size, u32 val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+	int ret;
+
+	kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true);
+	ret = dw_pcie_write(pci->dbi_base + where, size, val);
+	kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
+
+	return ret;
+}
+
+static u32 kirin_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
+			       u32 reg, size_t size)
+{
+	struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+	u32 ret;
+
+	kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true);
+	dw_pcie_read(base + reg, size, &ret);
+	kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false);
+
+	return ret;
+}
+
+static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
+				 u32 reg, size_t size, u32 val)
+{
+	struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+
+	kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true);
+	dw_pcie_write(base + reg, size, val);
+	kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
+}
+
+static int kirin_pcie_link_up(struct dw_pcie *pci)
+{
+	struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+	u32 val = kirin_apb_ctrl_readl(kirin_pcie, PCIE_APB_PHY_STATUS0);
+
+	if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE)
+		return 1;
+
+	return 0;
+}
+
+static int kirin_pcie_establish_link(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+	struct device *dev = kirin_pcie->pci->dev;
+	int count = 0;
+
+	if (kirin_pcie_link_up(pci))
+		return 0;
+
+	dw_pcie_setup_rc(pp);
+
+	/* assert LTSSM enable */
+	kirin_apb_ctrl_writel(kirin_pcie, PCIE_LTSSM_ENABLE_BIT,
+			      PCIE_APP_LTSSM_ENABLE);
+
+	/* check if the link is up or not */
+	while (!kirin_pcie_link_up(pci)) {
+		usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
+		count++;
+		if (count == 1000) {
+			dev_err(dev, "Link Fail\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int kirin_pcie_host_init(struct pcie_port *pp)
+{
+	kirin_pcie_establish_link(pp);
+
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		dw_pcie_msi_init(pp);
+
+	return 0;
+}
+
+static const struct dw_pcie_ops kirin_dw_pcie_ops = {
+	.read_dbi = kirin_pcie_read_dbi,
+	.write_dbi = kirin_pcie_write_dbi,
+	.link_up = kirin_pcie_link_up,
+};
+
+static const struct dw_pcie_host_ops kirin_pcie_host_ops = {
+	.rd_own_conf = kirin_pcie_rd_own_conf,
+	.wr_own_conf = kirin_pcie_wr_own_conf,
+	.host_init = kirin_pcie_host_init,
+};
+
+static int kirin_pcie_add_msi(struct dw_pcie *pci,
+				struct platform_device *pdev)
+{
+	int irq;
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		irq = platform_get_irq(pdev, 0);
+		if (irq < 0) {
+			dev_err(&pdev->dev,
+				"failed to get MSI IRQ (%d)\n", irq);
+			return irq;
+		}
+
+		pci->pp.msi_irq = irq;
+	}
+
+	return 0;
+}
+
+static int kirin_add_pcie_port(struct dw_pcie *pci,
+			       struct platform_device *pdev)
+{
+	int ret;
+
+	ret = kirin_pcie_add_msi(pci, pdev);
+	if (ret)
+		return ret;
+
+	pci->pp.ops = &kirin_pcie_host_ops;
+
+	return dw_pcie_host_init(&pci->pp);
+}
+
+static int kirin_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct kirin_pcie *kirin_pcie;
+	struct dw_pcie *pci;
+	int ret;
+
+	if (!dev->of_node) {
+		dev_err(dev, "NULL node\n");
+		return -EINVAL;
+	}
+
+	kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL);
+	if (!kirin_pcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	pci->dev = dev;
+	pci->ops = &kirin_dw_pcie_ops;
+	kirin_pcie->pci = pci;
+
+	ret = kirin_pcie_get_clk(kirin_pcie, pdev);
+	if (ret)
+		return ret;
+
+	ret = kirin_pcie_get_resource(kirin_pcie, pdev);
+	if (ret)
+		return ret;
+
+	kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node,
+						      "reset-gpios", 0);
+	if (kirin_pcie->gpio_id_reset < 0)
+		return -ENODEV;
+
+	ret = kirin_pcie_power_on(kirin_pcie);
+	if (ret)
+		return ret;
+
+	platform_set_drvdata(pdev, kirin_pcie);
+
+	return kirin_add_pcie_port(pci, pdev);
+}
+
+static const struct of_device_id kirin_pcie_match[] = {
+	{ .compatible = "hisilicon,kirin960-pcie" },
+	{},
+};
+
+static struct platform_driver kirin_pcie_driver = {
+	.probe			= kirin_pcie_probe,
+	.driver			= {
+		.name			= "kirin-pcie",
+		.of_match_table = kirin_pcie_match,
+		.suppress_bind_attrs = true,
+	},
+};
+builtin_platform_driver(kirin_pcie_driver);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-qcom.c b/marvell/linux/drivers/pci/controller/dwc/pcie-qcom.c
new file mode 100644
index 0000000..055cef6
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-qcom.c
@@ -0,0 +1,1551 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm PCIe root complex driver
+ *
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright 2015 Linaro Limited.
+ *
+ * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define PCIE20_PARF_SYS_CTRL			0x00
+#define MST_WAKEUP_EN				BIT(13)
+#define SLV_WAKEUP_EN				BIT(12)
+#define MSTR_ACLK_CGC_DIS			BIT(10)
+#define SLV_ACLK_CGC_DIS			BIT(9)
+#define CORE_CLK_CGC_DIS			BIT(6)
+#define AUX_PWR_DET				BIT(4)
+#define L23_CLK_RMV_DIS				BIT(2)
+#define L1_CLK_RMV_DIS				BIT(1)
+
+#define PCIE20_COMMAND_STATUS			0x04
+#define CMD_BME_VAL				0x4
+#define PCIE20_DEVICE_CONTROL2_STATUS2		0x98
+#define PCIE_CAP_CPL_TIMEOUT_DISABLE		0x10
+
+#define PCIE20_PARF_PHY_CTRL			0x40
+#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK	GENMASK(20, 16)
+#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x)		((x) << 16)
+
+#define PCIE20_PARF_PHY_REFCLK			0x4C
+#define PHY_REFCLK_SSP_EN			BIT(16)
+#define PHY_REFCLK_USE_PAD			BIT(12)
+
+#define PCIE20_PARF_DBI_BASE_ADDR		0x168
+#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE		0x16C
+#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL	0x174
+#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT	0x178
+#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2	0x1A8
+#define PCIE20_PARF_LTSSM			0x1B0
+#define PCIE20_PARF_SID_OFFSET			0x234
+#define PCIE20_PARF_BDF_TRANSLATE_CFG		0x24C
+#define PCIE20_PARF_DEVICE_TYPE			0x1000
+
+#define PCIE20_ELBI_SYS_CTRL			0x04
+#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE		BIT(0)
+
+#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0		0x818
+#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K	0x4
+#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K	0x5
+#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1		0x81c
+#define CFG_BRIDGE_SB_INIT			BIT(0)
+
+#define PCIE20_CAP				0x70
+#define PCIE20_CAP_LINK_CAPABILITIES		(PCIE20_CAP + 0xC)
+#define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT	(BIT(10) | BIT(11))
+#define PCIE20_CAP_LINK_1			(PCIE20_CAP + 0x14)
+#define PCIE_CAP_LINK1_VAL			0x2FD7F
+
+#define PCIE20_PARF_Q2A_FLUSH			0x1AC
+
+#define PCIE20_MISC_CONTROL_1_REG		0x8BC
+#define DBI_RO_WR_EN				1
+
+#define PERST_DELAY_US				1000
+/* PARF registers */
+#define PCIE20_PARF_PCS_DEEMPH			0x34
+#define PCS_DEEMPH_TX_DEEMPH_GEN1(x)		((x) << 16)
+#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x)	((x) << 8)
+#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x)	((x) << 0)
+
+#define PCIE20_PARF_PCS_SWING			0x38
+#define PCS_SWING_TX_SWING_FULL(x)		((x) << 8)
+#define PCS_SWING_TX_SWING_LOW(x)		((x) << 0)
+
+#define PCIE20_PARF_CONFIG_BITS		0x50
+#define PHY_RX0_EQ(x)				((x) << 24)
+
+#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE	0x358
+#define SLV_ADDR_SPACE_SZ			0x10000000
+
+#define DEVICE_TYPE_RC				0x4
+
+#define QCOM_PCIE_2_1_0_MAX_SUPPLY	3
+struct qcom_pcie_resources_2_1_0 {
+	struct clk *iface_clk;
+	struct clk *core_clk;
+	struct clk *phy_clk;
+	struct clk *aux_clk;
+	struct clk *ref_clk;
+	struct reset_control *pci_reset;
+	struct reset_control *axi_reset;
+	struct reset_control *ahb_reset;
+	struct reset_control *por_reset;
+	struct reset_control *phy_reset;
+	struct reset_control *ext_reset;
+	struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
+};
+
+struct qcom_pcie_resources_1_0_0 {
+	struct clk *iface;
+	struct clk *aux;
+	struct clk *master_bus;
+	struct clk *slave_bus;
+	struct reset_control *core;
+	struct regulator *vdda;
+};
+
+#define QCOM_PCIE_2_3_2_MAX_SUPPLY	2
+struct qcom_pcie_resources_2_3_2 {
+	struct clk *aux_clk;
+	struct clk *master_clk;
+	struct clk *slave_clk;
+	struct clk *cfg_clk;
+	struct clk *pipe_clk;
+	struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
+};
+
+#define QCOM_PCIE_2_4_0_MAX_CLOCKS	4
+struct qcom_pcie_resources_2_4_0 {
+	struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
+	int num_clks;
+	struct reset_control *axi_m_reset;
+	struct reset_control *axi_s_reset;
+	struct reset_control *pipe_reset;
+	struct reset_control *axi_m_vmid_reset;
+	struct reset_control *axi_s_xpu_reset;
+	struct reset_control *parf_reset;
+	struct reset_control *phy_reset;
+	struct reset_control *axi_m_sticky_reset;
+	struct reset_control *pipe_sticky_reset;
+	struct reset_control *pwr_reset;
+	struct reset_control *ahb_reset;
+	struct reset_control *phy_ahb_reset;
+};
+
+struct qcom_pcie_resources_2_3_3 {
+	struct clk *iface;
+	struct clk *axi_m_clk;
+	struct clk *axi_s_clk;
+	struct clk *ahb_clk;
+	struct clk *aux_clk;
+	struct reset_control *rst[7];
+};
+
+struct qcom_pcie_resources_2_7_0 {
+	struct clk_bulk_data clks[6];
+	struct regulator_bulk_data supplies[2];
+	struct reset_control *pci_reset;
+	struct clk *pipe_clk;
+};
+
+union qcom_pcie_resources {
+	struct qcom_pcie_resources_1_0_0 v1_0_0;
+	struct qcom_pcie_resources_2_1_0 v2_1_0;
+	struct qcom_pcie_resources_2_3_2 v2_3_2;
+	struct qcom_pcie_resources_2_3_3 v2_3_3;
+	struct qcom_pcie_resources_2_4_0 v2_4_0;
+	struct qcom_pcie_resources_2_7_0 v2_7_0;
+};
+
+struct qcom_pcie;
+
+struct qcom_pcie_ops {
+	int (*get_resources)(struct qcom_pcie *pcie);
+	int (*init)(struct qcom_pcie *pcie);
+	int (*post_init)(struct qcom_pcie *pcie);
+	void (*deinit)(struct qcom_pcie *pcie);
+	void (*post_deinit)(struct qcom_pcie *pcie);
+	void (*ltssm_enable)(struct qcom_pcie *pcie);
+};
+
+struct qcom_pcie {
+	struct dw_pcie *pci;
+	void __iomem *parf;			/* DT parf */
+	void __iomem *elbi;			/* DT elbi */
+	union qcom_pcie_resources res;
+	struct phy *phy;
+	struct gpio_desc *reset;
+	const struct qcom_pcie_ops *ops;
+};
+
+#define to_qcom_pcie(x)		dev_get_drvdata((x)->dev)
+
+static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
+{
+	gpiod_set_value_cansleep(pcie->reset, 1);
+	usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
+{
+	/* Ensure that PERST has been asserted for at least 100 ms */
+	msleep(100);
+	gpiod_set_value_cansleep(pcie->reset, 0);
+	usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
+{
+	struct dw_pcie *pci = pcie->pci;
+
+	if (dw_pcie_link_up(pci))
+		return 0;
+
+	/* Enable Link Training state machine */
+	if (pcie->ops->ltssm_enable)
+		pcie->ops->ltssm_enable(pcie);
+
+	return dw_pcie_wait_for_link(pci);
+}
+
+static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
+{
+	u32 val;
+
+	/* enable link training */
+	val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
+	val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
+	writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
+}
+
+static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	int ret;
+
+	res->supplies[0].supply = "vdda";
+	res->supplies[1].supply = "vdda_phy";
+	res->supplies[2].supply = "vdda_refclk";
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
+				      res->supplies);
+	if (ret)
+		return ret;
+
+	res->iface_clk = devm_clk_get(dev, "iface");
+	if (IS_ERR(res->iface_clk))
+		return PTR_ERR(res->iface_clk);
+
+	res->core_clk = devm_clk_get(dev, "core");
+	if (IS_ERR(res->core_clk))
+		return PTR_ERR(res->core_clk);
+
+	res->phy_clk = devm_clk_get(dev, "phy");
+	if (IS_ERR(res->phy_clk))
+		return PTR_ERR(res->phy_clk);
+
+	res->aux_clk = devm_clk_get_optional(dev, "aux");
+	if (IS_ERR(res->aux_clk))
+		return PTR_ERR(res->aux_clk);
+
+	res->ref_clk = devm_clk_get_optional(dev, "ref");
+	if (IS_ERR(res->ref_clk))
+		return PTR_ERR(res->ref_clk);
+
+	res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
+	if (IS_ERR(res->pci_reset))
+		return PTR_ERR(res->pci_reset);
+
+	res->axi_reset = devm_reset_control_get_exclusive(dev, "axi");
+	if (IS_ERR(res->axi_reset))
+		return PTR_ERR(res->axi_reset);
+
+	res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
+	if (IS_ERR(res->ahb_reset))
+		return PTR_ERR(res->ahb_reset);
+
+	res->por_reset = devm_reset_control_get_exclusive(dev, "por");
+	if (IS_ERR(res->por_reset))
+		return PTR_ERR(res->por_reset);
+
+	res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext");
+	if (IS_ERR(res->ext_reset))
+		return PTR_ERR(res->ext_reset);
+
+	res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
+	return PTR_ERR_OR_ZERO(res->phy_reset);
+}
+
+static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+
+	clk_disable_unprepare(res->phy_clk);
+	reset_control_assert(res->pci_reset);
+	reset_control_assert(res->axi_reset);
+	reset_control_assert(res->ahb_reset);
+	reset_control_assert(res->por_reset);
+	reset_control_assert(res->ext_reset);
+	reset_control_assert(res->phy_reset);
+	clk_disable_unprepare(res->iface_clk);
+	clk_disable_unprepare(res->core_clk);
+	clk_disable_unprepare(res->aux_clk);
+	clk_disable_unprepare(res->ref_clk);
+
+	writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+}
+
+static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	struct device_node *node = dev->of_node;
+	u32 val;
+	int ret;
+
+	/* reset the PCIe interface as uboot can leave it undefined state */
+	reset_control_assert(res->pci_reset);
+	reset_control_assert(res->axi_reset);
+	reset_control_assert(res->ahb_reset);
+	reset_control_assert(res->por_reset);
+	reset_control_assert(res->ext_reset);
+	reset_control_assert(res->phy_reset);
+
+	writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+	if (ret < 0) {
+		dev_err(dev, "cannot enable regulators\n");
+		return ret;
+	}
+
+	ret = reset_control_assert(res->ahb_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert ahb reset\n");
+		goto err_assert_ahb;
+	}
+
+	ret = clk_prepare_enable(res->iface_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable iface clock\n");
+		goto err_assert_ahb;
+	}
+
+	ret = clk_prepare_enable(res->core_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable core clock\n");
+		goto err_clk_core;
+	}
+
+	ret = clk_prepare_enable(res->aux_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable aux clock\n");
+		goto err_clk_aux;
+	}
+
+	ret = clk_prepare_enable(res->ref_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable ref clock\n");
+		goto err_clk_ref;
+	}
+
+	ret = reset_control_deassert(res->ahb_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert ahb reset\n");
+		goto err_deassert_ahb;
+	}
+
+	ret = reset_control_deassert(res->ext_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert ext reset\n");
+		goto err_deassert_ahb;
+	}
+
+	/* enable PCIe clocks and resets */
+	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+	val &= ~BIT(0);
+	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+	if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
+		writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
+			       PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
+			       PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
+		       pcie->parf + PCIE20_PARF_PCS_DEEMPH);
+		writel(PCS_SWING_TX_SWING_FULL(120) |
+			       PCS_SWING_TX_SWING_LOW(120),
+		       pcie->parf + PCIE20_PARF_PCS_SWING);
+		writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS);
+	}
+
+	if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
+		/* set TX termination offset */
+		val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+		val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
+		val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
+		writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+	}
+
+	/* enable external reference clock */
+	val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
+	/* USE_PAD is required only for ipq806x */
+	if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
+		val &= ~PHY_REFCLK_USE_PAD;
+	val |= PHY_REFCLK_SSP_EN;
+	writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
+
+	ret = reset_control_deassert(res->phy_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert phy reset\n");
+		return ret;
+	}
+
+	ret = reset_control_deassert(res->pci_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert pci reset\n");
+		return ret;
+	}
+
+	ret = reset_control_deassert(res->por_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert por reset\n");
+		return ret;
+	}
+
+	ret = reset_control_deassert(res->axi_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert axi reset\n");
+		return ret;
+	}
+
+	ret = clk_prepare_enable(res->phy_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable phy clock\n");
+		goto err_deassert_ahb;
+	}
+
+	/* wait for clock acquisition */
+	usleep_range(1000, 1500);
+
+
+	/* Set the Max TLP size to 2K, instead of using default of 4K */
+	writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
+	       pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
+	writel(CFG_BRIDGE_SB_INIT,
+	       pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
+
+	return 0;
+
+err_deassert_ahb:
+	clk_disable_unprepare(res->ref_clk);
+err_clk_ref:
+	clk_disable_unprepare(res->aux_clk);
+err_clk_aux:
+	clk_disable_unprepare(res->core_clk);
+err_clk_core:
+	clk_disable_unprepare(res->iface_clk);
+err_assert_ahb:
+	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+
+	return ret;
+}
+
+static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+
+	res->vdda = devm_regulator_get(dev, "vdda");
+	if (IS_ERR(res->vdda))
+		return PTR_ERR(res->vdda);
+
+	res->iface = devm_clk_get(dev, "iface");
+	if (IS_ERR(res->iface))
+		return PTR_ERR(res->iface);
+
+	res->aux = devm_clk_get(dev, "aux");
+	if (IS_ERR(res->aux))
+		return PTR_ERR(res->aux);
+
+	res->master_bus = devm_clk_get(dev, "master_bus");
+	if (IS_ERR(res->master_bus))
+		return PTR_ERR(res->master_bus);
+
+	res->slave_bus = devm_clk_get(dev, "slave_bus");
+	if (IS_ERR(res->slave_bus))
+		return PTR_ERR(res->slave_bus);
+
+	res->core = devm_reset_control_get_exclusive(dev, "core");
+	return PTR_ERR_OR_ZERO(res->core);
+}
+
+static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
+
+	reset_control_assert(res->core);
+	clk_disable_unprepare(res->slave_bus);
+	clk_disable_unprepare(res->master_bus);
+	clk_disable_unprepare(res->iface);
+	clk_disable_unprepare(res->aux);
+	regulator_disable(res->vdda);
+}
+
+static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	int ret;
+
+	ret = reset_control_deassert(res->core);
+	if (ret) {
+		dev_err(dev, "cannot deassert core reset\n");
+		return ret;
+	}
+
+	ret = clk_prepare_enable(res->aux);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable aux clock\n");
+		goto err_res;
+	}
+
+	ret = clk_prepare_enable(res->iface);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable iface clock\n");
+		goto err_aux;
+	}
+
+	ret = clk_prepare_enable(res->master_bus);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable master_bus clock\n");
+		goto err_iface;
+	}
+
+	ret = clk_prepare_enable(res->slave_bus);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable slave_bus clock\n");
+		goto err_master;
+	}
+
+	ret = regulator_enable(res->vdda);
+	if (ret) {
+		dev_err(dev, "cannot enable vdda regulator\n");
+		goto err_slave;
+	}
+
+	/* change DBI base address */
+	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+
+		val |= BIT(31);
+		writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+	}
+
+	return 0;
+err_slave:
+	clk_disable_unprepare(res->slave_bus);
+err_master:
+	clk_disable_unprepare(res->master_bus);
+err_iface:
+	clk_disable_unprepare(res->iface);
+err_aux:
+	clk_disable_unprepare(res->aux);
+err_res:
+	reset_control_assert(res->core);
+
+	return ret;
+}
+
+static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
+{
+	u32 val;
+
+	/* enable link training */
+	val = readl(pcie->parf + PCIE20_PARF_LTSSM);
+	val |= BIT(8);
+	writel(val, pcie->parf + PCIE20_PARF_LTSSM);
+}
+
+static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	int ret;
+
+	res->supplies[0].supply = "vdda";
+	res->supplies[1].supply = "vddpe-3v3";
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
+				      res->supplies);
+	if (ret)
+		return ret;
+
+	res->aux_clk = devm_clk_get(dev, "aux");
+	if (IS_ERR(res->aux_clk))
+		return PTR_ERR(res->aux_clk);
+
+	res->cfg_clk = devm_clk_get(dev, "cfg");
+	if (IS_ERR(res->cfg_clk))
+		return PTR_ERR(res->cfg_clk);
+
+	res->master_clk = devm_clk_get(dev, "bus_master");
+	if (IS_ERR(res->master_clk))
+		return PTR_ERR(res->master_clk);
+
+	res->slave_clk = devm_clk_get(dev, "bus_slave");
+	if (IS_ERR(res->slave_clk))
+		return PTR_ERR(res->slave_clk);
+
+	res->pipe_clk = devm_clk_get(dev, "pipe");
+	return PTR_ERR_OR_ZERO(res->pipe_clk);
+}
+
+static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+
+	clk_disable_unprepare(res->slave_clk);
+	clk_disable_unprepare(res->master_clk);
+	clk_disable_unprepare(res->cfg_clk);
+	clk_disable_unprepare(res->aux_clk);
+
+	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+}
+
+static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+
+	clk_disable_unprepare(res->pipe_clk);
+}
+
+static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	u32 val;
+	int ret;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+	if (ret < 0) {
+		dev_err(dev, "cannot enable regulators\n");
+		return ret;
+	}
+
+	ret = clk_prepare_enable(res->aux_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable aux clock\n");
+		goto err_aux_clk;
+	}
+
+	ret = clk_prepare_enable(res->cfg_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable cfg clock\n");
+		goto err_cfg_clk;
+	}
+
+	ret = clk_prepare_enable(res->master_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable master clock\n");
+		goto err_master_clk;
+	}
+
+	ret = clk_prepare_enable(res->slave_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable slave clock\n");
+		goto err_slave_clk;
+	}
+
+	/* enable PCIe clocks and resets */
+	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+	val &= ~BIT(0);
+	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+	/* change DBI base address */
+	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+	/* MAC PHY_POWERDOWN MUX DISABLE  */
+	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
+	val &= ~BIT(29);
+	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
+
+	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+	val |= BIT(4);
+	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+
+	val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+	val |= BIT(31);
+	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+
+	return 0;
+
+err_slave_clk:
+	clk_disable_unprepare(res->master_clk);
+err_master_clk:
+	clk_disable_unprepare(res->cfg_clk);
+err_cfg_clk:
+	clk_disable_unprepare(res->aux_clk);
+
+err_aux_clk:
+	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+
+	return ret;
+}
+
+static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	int ret;
+
+	ret = clk_prepare_enable(res->pipe_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable pipe clock\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
+	int ret;
+
+	res->clks[0].id = "aux";
+	res->clks[1].id = "master_bus";
+	res->clks[2].id = "slave_bus";
+	res->clks[3].id = "iface";
+
+	/* qcom,pcie-ipq4019 is defined without "iface" */
+	res->num_clks = is_ipq ? 3 : 4;
+
+	ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
+	if (ret < 0)
+		return ret;
+
+	res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
+	if (IS_ERR(res->axi_m_reset))
+		return PTR_ERR(res->axi_m_reset);
+
+	res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s");
+	if (IS_ERR(res->axi_s_reset))
+		return PTR_ERR(res->axi_s_reset);
+
+	if (is_ipq) {
+		/*
+		 * These resources relates to the PHY or are secure clocks, but
+		 * are controlled here for IPQ4019
+		 */
+		res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
+		if (IS_ERR(res->pipe_reset))
+			return PTR_ERR(res->pipe_reset);
+
+		res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
+									 "axi_m_vmid");
+		if (IS_ERR(res->axi_m_vmid_reset))
+			return PTR_ERR(res->axi_m_vmid_reset);
+
+		res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
+									"axi_s_xpu");
+		if (IS_ERR(res->axi_s_xpu_reset))
+			return PTR_ERR(res->axi_s_xpu_reset);
+
+		res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
+		if (IS_ERR(res->parf_reset))
+			return PTR_ERR(res->parf_reset);
+
+		res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
+		if (IS_ERR(res->phy_reset))
+			return PTR_ERR(res->phy_reset);
+	}
+
+	res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
+								   "axi_m_sticky");
+	if (IS_ERR(res->axi_m_sticky_reset))
+		return PTR_ERR(res->axi_m_sticky_reset);
+
+	res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev,
+								  "pipe_sticky");
+	if (IS_ERR(res->pipe_sticky_reset))
+		return PTR_ERR(res->pipe_sticky_reset);
+
+	res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr");
+	if (IS_ERR(res->pwr_reset))
+		return PTR_ERR(res->pwr_reset);
+
+	res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
+	if (IS_ERR(res->ahb_reset))
+		return PTR_ERR(res->ahb_reset);
+
+	if (is_ipq) {
+		res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
+		if (IS_ERR(res->phy_ahb_reset))
+			return PTR_ERR(res->phy_ahb_reset);
+	}
+
+	return 0;
+}
+
+static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
+
+	reset_control_assert(res->axi_m_reset);
+	reset_control_assert(res->axi_s_reset);
+	reset_control_assert(res->pipe_reset);
+	reset_control_assert(res->pipe_sticky_reset);
+	reset_control_assert(res->phy_reset);
+	reset_control_assert(res->phy_ahb_reset);
+	reset_control_assert(res->axi_m_sticky_reset);
+	reset_control_assert(res->pwr_reset);
+	reset_control_assert(res->ahb_reset);
+	clk_bulk_disable_unprepare(res->num_clks, res->clks);
+}
+
+static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	u32 val;
+	int ret;
+
+	ret = reset_control_assert(res->axi_m_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert axi master reset\n");
+		return ret;
+	}
+
+	ret = reset_control_assert(res->axi_s_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert axi slave reset\n");
+		return ret;
+	}
+
+	usleep_range(10000, 12000);
+
+	ret = reset_control_assert(res->pipe_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert pipe reset\n");
+		return ret;
+	}
+
+	ret = reset_control_assert(res->pipe_sticky_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert pipe sticky reset\n");
+		return ret;
+	}
+
+	ret = reset_control_assert(res->phy_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert phy reset\n");
+		return ret;
+	}
+
+	ret = reset_control_assert(res->phy_ahb_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert phy ahb reset\n");
+		return ret;
+	}
+
+	usleep_range(10000, 12000);
+
+	ret = reset_control_assert(res->axi_m_sticky_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert axi master sticky reset\n");
+		return ret;
+	}
+
+	ret = reset_control_assert(res->pwr_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert power reset\n");
+		return ret;
+	}
+
+	ret = reset_control_assert(res->ahb_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert ahb reset\n");
+		return ret;
+	}
+
+	usleep_range(10000, 12000);
+
+	ret = reset_control_deassert(res->phy_ahb_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert phy ahb reset\n");
+		return ret;
+	}
+
+	ret = reset_control_deassert(res->phy_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert phy reset\n");
+		goto err_rst_phy;
+	}
+
+	ret = reset_control_deassert(res->pipe_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert pipe reset\n");
+		goto err_rst_pipe;
+	}
+
+	ret = reset_control_deassert(res->pipe_sticky_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert pipe sticky reset\n");
+		goto err_rst_pipe_sticky;
+	}
+
+	usleep_range(10000, 12000);
+
+	ret = reset_control_deassert(res->axi_m_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert axi master reset\n");
+		goto err_rst_axi_m;
+	}
+
+	ret = reset_control_deassert(res->axi_m_sticky_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert axi master sticky reset\n");
+		goto err_rst_axi_m_sticky;
+	}
+
+	ret = reset_control_deassert(res->axi_s_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert axi slave reset\n");
+		goto err_rst_axi_s;
+	}
+
+	ret = reset_control_deassert(res->pwr_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert power reset\n");
+		goto err_rst_pwr;
+	}
+
+	ret = reset_control_deassert(res->ahb_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert ahb reset\n");
+		goto err_rst_ahb;
+	}
+
+	usleep_range(10000, 12000);
+
+	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
+	if (ret)
+		goto err_clks;
+
+	/* enable PCIe clocks and resets */
+	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+	val &= ~BIT(0);
+	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+	/* change DBI base address */
+	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+	/* MAC PHY_POWERDOWN MUX DISABLE  */
+	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
+	val &= ~BIT(29);
+	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
+
+	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+	val |= BIT(4);
+	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+
+	val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+	val |= BIT(31);
+	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+
+	return 0;
+
+err_clks:
+	reset_control_assert(res->ahb_reset);
+err_rst_ahb:
+	reset_control_assert(res->pwr_reset);
+err_rst_pwr:
+	reset_control_assert(res->axi_s_reset);
+err_rst_axi_s:
+	reset_control_assert(res->axi_m_sticky_reset);
+err_rst_axi_m_sticky:
+	reset_control_assert(res->axi_m_reset);
+err_rst_axi_m:
+	reset_control_assert(res->pipe_sticky_reset);
+err_rst_pipe_sticky:
+	reset_control_assert(res->pipe_reset);
+err_rst_pipe:
+	reset_control_assert(res->phy_reset);
+err_rst_phy:
+	reset_control_assert(res->phy_ahb_reset);
+	return ret;
+}
+
+static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	int i;
+	const char *rst_names[] = { "axi_m", "axi_s", "pipe",
+				    "axi_m_sticky", "sticky",
+				    "ahb", "sleep", };
+
+	res->iface = devm_clk_get(dev, "iface");
+	if (IS_ERR(res->iface))
+		return PTR_ERR(res->iface);
+
+	res->axi_m_clk = devm_clk_get(dev, "axi_m");
+	if (IS_ERR(res->axi_m_clk))
+		return PTR_ERR(res->axi_m_clk);
+
+	res->axi_s_clk = devm_clk_get(dev, "axi_s");
+	if (IS_ERR(res->axi_s_clk))
+		return PTR_ERR(res->axi_s_clk);
+
+	res->ahb_clk = devm_clk_get(dev, "ahb");
+	if (IS_ERR(res->ahb_clk))
+		return PTR_ERR(res->ahb_clk);
+
+	res->aux_clk = devm_clk_get(dev, "aux");
+	if (IS_ERR(res->aux_clk))
+		return PTR_ERR(res->aux_clk);
+
+	for (i = 0; i < ARRAY_SIZE(rst_names); i++) {
+		res->rst[i] = devm_reset_control_get(dev, rst_names[i]);
+		if (IS_ERR(res->rst[i]))
+			return PTR_ERR(res->rst[i]);
+	}
+
+	return 0;
+}
+
+static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+
+	clk_disable_unprepare(res->iface);
+	clk_disable_unprepare(res->axi_m_clk);
+	clk_disable_unprepare(res->axi_s_clk);
+	clk_disable_unprepare(res->ahb_clk);
+	clk_disable_unprepare(res->aux_clk);
+}
+
+static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	int i, ret;
+	u32 val;
+
+	for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
+		ret = reset_control_assert(res->rst[i]);
+		if (ret) {
+			dev_err(dev, "reset #%d assert failed (%d)\n", i, ret);
+			return ret;
+		}
+	}
+
+	usleep_range(2000, 2500);
+
+	for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
+		ret = reset_control_deassert(res->rst[i]);
+		if (ret) {
+			dev_err(dev, "reset #%d deassert failed (%d)\n", i,
+				ret);
+			return ret;
+		}
+	}
+
+	/*
+	 * Don't have a way to see if the reset has completed.
+	 * Wait for some time.
+	 */
+	usleep_range(2000, 2500);
+
+	ret = clk_prepare_enable(res->iface);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable core clock\n");
+		goto err_clk_iface;
+	}
+
+	ret = clk_prepare_enable(res->axi_m_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable core clock\n");
+		goto err_clk_axi_m;
+	}
+
+	ret = clk_prepare_enable(res->axi_s_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable axi slave clock\n");
+		goto err_clk_axi_s;
+	}
+
+	ret = clk_prepare_enable(res->ahb_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable ahb clock\n");
+		goto err_clk_ahb;
+	}
+
+	ret = clk_prepare_enable(res->aux_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable aux clock\n");
+		goto err_clk_aux;
+	}
+
+	writel(SLV_ADDR_SPACE_SZ,
+		pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
+
+	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+	val &= ~BIT(0);
+	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+	writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
+		| SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
+		AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
+		pcie->parf + PCIE20_PARF_SYS_CTRL);
+	writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
+
+	writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS);
+	writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
+	writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1);
+
+	val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
+	val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT;
+	writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
+
+	writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base +
+		PCIE20_DEVICE_CONTROL2_STATUS2);
+
+	return 0;
+
+err_clk_aux:
+	clk_disable_unprepare(res->ahb_clk);
+err_clk_ahb:
+	clk_disable_unprepare(res->axi_s_clk);
+err_clk_axi_s:
+	clk_disable_unprepare(res->axi_m_clk);
+err_clk_axi_m:
+	clk_disable_unprepare(res->iface);
+err_clk_iface:
+	/*
+	 * Not checking for failure, will anyway return
+	 * the original failure in 'ret'.
+	 */
+	for (i = 0; i < ARRAY_SIZE(res->rst); i++)
+		reset_control_assert(res->rst[i]);
+
+	return ret;
+}
+
+static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	int ret;
+
+	res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
+	if (IS_ERR(res->pci_reset))
+		return PTR_ERR(res->pci_reset);
+
+	res->supplies[0].supply = "vdda";
+	res->supplies[1].supply = "vddpe-3v3";
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
+				      res->supplies);
+	if (ret)
+		return ret;
+
+	res->clks[0].id = "aux";
+	res->clks[1].id = "cfg";
+	res->clks[2].id = "bus_master";
+	res->clks[3].id = "bus_slave";
+	res->clks[4].id = "slave_q2a";
+	res->clks[5].id = "tbu";
+
+	ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
+	if (ret < 0)
+		return ret;
+
+	res->pipe_clk = devm_clk_get(dev, "pipe");
+	return PTR_ERR_OR_ZERO(res->pipe_clk);
+}
+
+static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	u32 val;
+	int ret;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+	if (ret < 0) {
+		dev_err(dev, "cannot enable regulators\n");
+		return ret;
+	}
+
+	ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+	if (ret < 0)
+		goto err_disable_regulators;
+
+	ret = reset_control_assert(res->pci_reset);
+	if (ret < 0) {
+		dev_err(dev, "cannot deassert pci reset\n");
+		goto err_disable_clocks;
+	}
+
+	msleep(10);
+
+	ret = reset_control_deassert(res->pci_reset);
+	if (ret < 0) {
+		dev_err(dev, "cannot deassert pci reset\n");
+		goto err_assert_resets;
+	}
+
+	ret = clk_prepare_enable(res->pipe_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable pipe clock\n");
+		goto err_assert_resets;
+	}
+
+	/* configure PCIe to RC mode */
+	writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
+
+	/* enable PCIe clocks and resets */
+	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+	val &= ~BIT(0);
+	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+	/* change DBI base address */
+	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+	/* MAC PHY_POWERDOWN MUX DISABLE  */
+	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
+	val &= ~BIT(29);
+	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
+
+	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+	val |= BIT(4);
+	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+		val |= BIT(31);
+		writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+	}
+
+	return 0;
+err_assert_resets:
+	reset_control_assert(res->pci_reset);
+err_disable_clocks:
+	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+err_disable_regulators:
+	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+
+	return ret;
+}
+
+static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+
+	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+}
+
+static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+
+	return clk_prepare_enable(res->pipe_clk);
+}
+
+static void qcom_pcie_post_deinit_2_7_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+
+	clk_disable_unprepare(res->pipe_clk);
+}
+
+static int qcom_pcie_link_up(struct dw_pcie *pci)
+{
+	u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
+
+	return !!(val & PCI_EXP_LNKSTA_DLLLA);
+}
+
+static int qcom_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct qcom_pcie *pcie = to_qcom_pcie(pci);
+	int ret;
+
+	qcom_ep_reset_assert(pcie);
+
+	ret = pcie->ops->init(pcie);
+	if (ret)
+		return ret;
+
+	ret = phy_power_on(pcie->phy);
+	if (ret)
+		goto err_deinit;
+
+	if (pcie->ops->post_init) {
+		ret = pcie->ops->post_init(pcie);
+		if (ret)
+			goto err_disable_phy;
+	}
+
+	dw_pcie_setup_rc(pp);
+
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		dw_pcie_msi_init(pp);
+
+	qcom_ep_reset_deassert(pcie);
+
+	ret = qcom_pcie_establish_link(pcie);
+	if (ret)
+		goto err;
+
+	return 0;
+err:
+	qcom_ep_reset_assert(pcie);
+	if (pcie->ops->post_deinit)
+		pcie->ops->post_deinit(pcie);
+err_disable_phy:
+	phy_power_off(pcie->phy);
+err_deinit:
+	pcie->ops->deinit(pcie);
+
+	return ret;
+}
+
+static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
+	.host_init = qcom_pcie_host_init,
+};
+
+/* Qcom IP rev.: 2.1.0	Synopsys IP rev.: 4.01a */
+static const struct qcom_pcie_ops ops_2_1_0 = {
+	.get_resources = qcom_pcie_get_resources_2_1_0,
+	.init = qcom_pcie_init_2_1_0,
+	.deinit = qcom_pcie_deinit_2_1_0,
+	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
+};
+
+/* Qcom IP rev.: 1.0.0	Synopsys IP rev.: 4.11a */
+static const struct qcom_pcie_ops ops_1_0_0 = {
+	.get_resources = qcom_pcie_get_resources_1_0_0,
+	.init = qcom_pcie_init_1_0_0,
+	.deinit = qcom_pcie_deinit_1_0_0,
+	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.3.2	Synopsys IP rev.: 4.21a */
+static const struct qcom_pcie_ops ops_2_3_2 = {
+	.get_resources = qcom_pcie_get_resources_2_3_2,
+	.init = qcom_pcie_init_2_3_2,
+	.post_init = qcom_pcie_post_init_2_3_2,
+	.deinit = qcom_pcie_deinit_2_3_2,
+	.post_deinit = qcom_pcie_post_deinit_2_3_2,
+	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.4.0	Synopsys IP rev.: 4.20a */
+static const struct qcom_pcie_ops ops_2_4_0 = {
+	.get_resources = qcom_pcie_get_resources_2_4_0,
+	.init = qcom_pcie_init_2_4_0,
+	.deinit = qcom_pcie_deinit_2_4_0,
+	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.3.3	Synopsys IP rev.: 4.30a */
+static const struct qcom_pcie_ops ops_2_3_3 = {
+	.get_resources = qcom_pcie_get_resources_2_3_3,
+	.init = qcom_pcie_init_2_3_3,
+	.deinit = qcom_pcie_deinit_2_3_3,
+	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.7.0	Synopsys IP rev.: 4.30a */
+static const struct qcom_pcie_ops ops_2_7_0 = {
+	.get_resources = qcom_pcie_get_resources_2_7_0,
+	.init = qcom_pcie_init_2_7_0,
+	.deinit = qcom_pcie_deinit_2_7_0,
+	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+	.post_init = qcom_pcie_post_init_2_7_0,
+	.post_deinit = qcom_pcie_post_deinit_2_7_0,
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	.link_up = qcom_pcie_link_up,
+};
+
+static int qcom_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	struct pcie_port *pp;
+	struct dw_pcie *pci;
+	struct qcom_pcie *pcie;
+	int ret;
+
+	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+	if (!pcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	pm_runtime_enable(dev);
+	ret = pm_runtime_get_sync(dev);
+	if (ret < 0) {
+		pm_runtime_disable(dev);
+		return ret;
+	}
+
+	pci->dev = dev;
+	pci->ops = &dw_pcie_ops;
+	pp = &pci->pp;
+
+	pcie->pci = pci;
+
+	pcie->ops = of_device_get_match_data(dev);
+
+	pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
+	if (IS_ERR(pcie->reset)) {
+		ret = PTR_ERR(pcie->reset);
+		goto err_pm_runtime_put;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
+	pcie->parf = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pcie->parf)) {
+		ret = PTR_ERR(pcie->parf);
+		goto err_pm_runtime_put;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+	pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
+	if (IS_ERR(pci->dbi_base)) {
+		ret = PTR_ERR(pci->dbi_base);
+		goto err_pm_runtime_put;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
+	pcie->elbi = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pcie->elbi)) {
+		ret = PTR_ERR(pcie->elbi);
+		goto err_pm_runtime_put;
+	}
+
+	pcie->phy = devm_phy_optional_get(dev, "pciephy");
+	if (IS_ERR(pcie->phy)) {
+		ret = PTR_ERR(pcie->phy);
+		goto err_pm_runtime_put;
+	}
+
+	ret = pcie->ops->get_resources(pcie);
+	if (ret)
+		goto err_pm_runtime_put;
+
+	pp->ops = &qcom_pcie_dw_ops;
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		pp->msi_irq = platform_get_irq_byname(pdev, "msi");
+		if (pp->msi_irq < 0) {
+			ret = pp->msi_irq;
+			goto err_pm_runtime_put;
+		}
+	}
+
+	ret = phy_init(pcie->phy);
+	if (ret)
+		goto err_pm_runtime_put;
+
+	platform_set_drvdata(pdev, pcie);
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "cannot initialize host\n");
+		goto err_phy_exit;
+	}
+
+	return 0;
+
+err_phy_exit:
+	phy_exit(pcie->phy);
+err_pm_runtime_put:
+	pm_runtime_put(dev);
+	pm_runtime_disable(dev);
+
+	return ret;
+}
+
+static const struct of_device_id qcom_pcie_match[] = {
+	{ .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 },
+	{ .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 },
+	{ .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 },
+	{ .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
+	{ .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
+	{ .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
+	{ .compatible = "qcom,pcie-qcs404", .data = &ops_2_4_0 },
+	{ .compatible = "qcom,pcie-sdm845", .data = &ops_2_7_0 },
+	{ }
+};
+
+static void qcom_fixup_class(struct pci_dev *dev)
+{
+	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
+
+static struct platform_driver qcom_pcie_driver = {
+	.probe = qcom_pcie_probe,
+	.driver = {
+		.name = "qcom-pcie",
+		.suppress_bind_attrs = true,
+		.of_match_table = qcom_pcie_match,
+	},
+};
+builtin_platform_driver(qcom_pcie_driver);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-spear13xx.c b/marvell/linux/drivers/pci/controller/dwc/pcie-spear13xx.c
new file mode 100644
index 0000000..7d0cdfd
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-spear13xx.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for ST Microelectronics SPEAr13xx SoCs
+ *
+ * SPEAr13xx PCIe Glue Layer Source Code
+ *
+ * Copyright (C) 2010-2014 ST Microelectronics
+ * Pratyush Anand <pratyush.anand@gmail.com>
+ * Mohit Kumar <mohit.kumar.dhaka@gmail.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+
+#include "pcie-designware.h"
+
+struct spear13xx_pcie {
+	struct dw_pcie		*pci;
+	void __iomem		*app_base;
+	struct phy		*phy;
+	struct clk		*clk;
+	bool			is_gen1;
+};
+
+struct pcie_app_reg {
+	u32	app_ctrl_0;		/* cr0 */
+	u32	app_ctrl_1;		/* cr1 */
+	u32	app_status_0;		/* cr2 */
+	u32	app_status_1;		/* cr3 */
+	u32	msg_status;		/* cr4 */
+	u32	msg_payload;		/* cr5 */
+	u32	int_sts;		/* cr6 */
+	u32	int_clr;		/* cr7 */
+	u32	int_mask;		/* cr8 */
+	u32	mst_bmisc;		/* cr9 */
+	u32	phy_ctrl;		/* cr10 */
+	u32	phy_status;		/* cr11 */
+	u32	cxpl_debug_info_0;	/* cr12 */
+	u32	cxpl_debug_info_1;	/* cr13 */
+	u32	ven_msg_ctrl_0;		/* cr14 */
+	u32	ven_msg_ctrl_1;		/* cr15 */
+	u32	ven_msg_data_0;		/* cr16 */
+	u32	ven_msg_data_1;		/* cr17 */
+	u32	ven_msi_0;		/* cr18 */
+	u32	ven_msi_1;		/* cr19 */
+	u32	mst_rmisc;		/* cr20 */
+};
+
+/* CR0 ID */
+#define APP_LTSSM_ENABLE_ID			3
+#define DEVICE_TYPE_RC				(4 << 25)
+#define MISCTRL_EN_ID				30
+#define REG_TRANSLATION_ENABLE			31
+
+/* CR3 ID */
+#define XMLH_LINK_UP				(1 << 6)
+
+/* CR6 */
+#define MSI_CTRL_INT				(1 << 26)
+
+#define EXP_CAP_ID_OFFSET			0x70
+
+#define to_spear13xx_pcie(x)	dev_get_drvdata((x)->dev)
+
+static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie)
+{
+	struct dw_pcie *pci = spear13xx_pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+	u32 val;
+	u32 exp_cap_off = EXP_CAP_ID_OFFSET;
+
+	if (dw_pcie_link_up(pci)) {
+		dev_err(pci->dev, "link already up\n");
+		return 0;
+	}
+
+	dw_pcie_setup_rc(pp);
+
+	/*
+	 * this controller support only 128 bytes read size, however its
+	 * default value in capability register is 512 bytes. So force
+	 * it to 128 here.
+	 */
+	dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, &val);
+	val &= ~PCI_EXP_DEVCTL_READRQ;
+	dw_pcie_write(pci->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, val);
+
+	dw_pcie_write(pci->dbi_base + PCI_VENDOR_ID, 2, 0x104A);
+	dw_pcie_write(pci->dbi_base + PCI_DEVICE_ID, 2, 0xCD80);
+
+	/*
+	 * if is_gen1 is set then handle it, so that some buggy card
+	 * also works
+	 */
+	if (spear13xx_pcie->is_gen1) {
+		dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP,
+			     4, &val);
+		if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
+			val &= ~((u32)PCI_EXP_LNKCAP_SLS);
+			val |= PCI_EXP_LNKCAP_SLS_2_5GB;
+			dw_pcie_write(pci->dbi_base + exp_cap_off +
+				      PCI_EXP_LNKCAP, 4, val);
+		}
+
+		dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2,
+			     2, &val);
+		if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
+			val &= ~((u32)PCI_EXP_LNKCAP_SLS);
+			val |= PCI_EXP_LNKCAP_SLS_2_5GB;
+			dw_pcie_write(pci->dbi_base + exp_cap_off +
+				      PCI_EXP_LNKCTL2, 2, val);
+		}
+	}
+
+	/* enable ltssm */
+	writel(DEVICE_TYPE_RC | (1 << MISCTRL_EN_ID)
+			| (1 << APP_LTSSM_ENABLE_ID)
+			| ((u32)1 << REG_TRANSLATION_ENABLE),
+			&app_reg->app_ctrl_0);
+
+	return dw_pcie_wait_for_link(pci);
+}
+
+static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
+{
+	struct spear13xx_pcie *spear13xx_pcie = arg;
+	struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+	struct dw_pcie *pci = spear13xx_pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	unsigned int status;
+
+	status = readl(&app_reg->int_sts);
+
+	if (status & MSI_CTRL_INT) {
+		BUG_ON(!IS_ENABLED(CONFIG_PCI_MSI));
+		dw_handle_msi_irq(pp);
+	}
+
+	writel(status, &app_reg->int_clr);
+
+	return IRQ_HANDLED;
+}
+
+static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pcie)
+{
+	struct dw_pcie *pci = spear13xx_pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+
+	/* Enable MSI interrupt */
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		dw_pcie_msi_init(pp);
+		writel(readl(&app_reg->int_mask) |
+				MSI_CTRL_INT, &app_reg->int_mask);
+	}
+}
+
+static int spear13xx_pcie_link_up(struct dw_pcie *pci)
+{
+	struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
+	struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+
+	if (readl(&app_reg->app_status_1) & XMLH_LINK_UP)
+		return 1;
+
+	return 0;
+}
+
+static int spear13xx_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
+
+	spear13xx_pcie_establish_link(spear13xx_pcie);
+	spear13xx_pcie_enable_interrupts(spear13xx_pcie);
+
+	return 0;
+}
+
+static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = {
+	.host_init = spear13xx_pcie_host_init,
+};
+
+static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
+				   struct platform_device *pdev)
+{
+	struct dw_pcie *pci = spear13xx_pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	pp->irq = platform_get_irq(pdev, 0);
+	if (pp->irq < 0) {
+		dev_err(dev, "failed to get irq\n");
+		return pp->irq;
+	}
+	ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler,
+			       IRQF_SHARED | IRQF_NO_THREAD,
+			       "spear1340-pcie", spear13xx_pcie);
+	if (ret) {
+		dev_err(dev, "failed to request irq %d\n", pp->irq);
+		return ret;
+	}
+
+	pp->ops = &spear13xx_pcie_host_ops;
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "failed to initialize host\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	.link_up = spear13xx_pcie_link_up,
+};
+
+static int spear13xx_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dw_pcie *pci;
+	struct spear13xx_pcie *spear13xx_pcie;
+	struct device_node *np = dev->of_node;
+	struct resource *dbi_base;
+	int ret;
+
+	spear13xx_pcie = devm_kzalloc(dev, sizeof(*spear13xx_pcie), GFP_KERNEL);
+	if (!spear13xx_pcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	pci->dev = dev;
+	pci->ops = &dw_pcie_ops;
+
+	spear13xx_pcie->pci = pci;
+
+	spear13xx_pcie->phy = devm_phy_get(dev, "pcie-phy");
+	if (IS_ERR(spear13xx_pcie->phy)) {
+		ret = PTR_ERR(spear13xx_pcie->phy);
+		if (ret == -EPROBE_DEFER)
+			dev_info(dev, "probe deferred\n");
+		else
+			dev_err(dev, "couldn't get pcie-phy\n");
+		return ret;
+	}
+
+	phy_init(spear13xx_pcie->phy);
+
+	spear13xx_pcie->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(spear13xx_pcie->clk)) {
+		dev_err(dev, "couldn't get clk for pcie\n");
+		return PTR_ERR(spear13xx_pcie->clk);
+	}
+	ret = clk_prepare_enable(spear13xx_pcie->clk);
+	if (ret) {
+		dev_err(dev, "couldn't enable clk for pcie\n");
+		return ret;
+	}
+
+	dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+	pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
+	if (IS_ERR(pci->dbi_base)) {
+		dev_err(dev, "couldn't remap dbi base %p\n", dbi_base);
+		ret = PTR_ERR(pci->dbi_base);
+		goto fail_clk;
+	}
+	spear13xx_pcie->app_base = pci->dbi_base + 0x2000;
+
+	if (of_property_read_bool(np, "st,pcie-is-gen1"))
+		spear13xx_pcie->is_gen1 = true;
+
+	platform_set_drvdata(pdev, spear13xx_pcie);
+
+	ret = spear13xx_add_pcie_port(spear13xx_pcie, pdev);
+	if (ret < 0)
+		goto fail_clk;
+
+	return 0;
+
+fail_clk:
+	clk_disable_unprepare(spear13xx_pcie->clk);
+
+	return ret;
+}
+
+static const struct of_device_id spear13xx_pcie_of_match[] = {
+	{ .compatible = "st,spear1340-pcie", },
+	{},
+};
+
+static struct platform_driver spear13xx_pcie_driver = {
+	.probe		= spear13xx_pcie_probe,
+	.driver = {
+		.name	= "spear-pcie",
+		.of_match_table = of_match_ptr(spear13xx_pcie_of_match),
+		.suppress_bind_attrs = true,
+	},
+};
+
+builtin_platform_driver(spear13xx_pcie_driver);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-tegra194.c b/marvell/linux/drivers/pci/controller/dwc/pcie-tegra194.c
new file mode 100644
index 0000000..1cf9485
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -0,0 +1,1728 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PCIe host controller driver for Tegra194 SoC
+ *
+ * Copyright (C) 2019 NVIDIA Corporation.
+ *
+ * Author: Vidya Sagar <vidyas@nvidia.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/random.h>
+#include <linux/reset.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+#include "pcie-designware.h"
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+#include "../../pci.h"
+
+#define APPL_PINMUX				0x0
+#define APPL_PINMUX_PEX_RST			BIT(0)
+#define APPL_PINMUX_CLKREQ_OVERRIDE_EN		BIT(2)
+#define APPL_PINMUX_CLKREQ_OVERRIDE		BIT(3)
+#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN	BIT(4)
+#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE	BIT(5)
+#define APPL_PINMUX_CLKREQ_OUT_OVRD_EN		BIT(9)
+#define APPL_PINMUX_CLKREQ_OUT_OVRD		BIT(10)
+
+#define APPL_CTRL				0x4
+#define APPL_CTRL_SYS_PRE_DET_STATE		BIT(6)
+#define APPL_CTRL_LTSSM_EN			BIT(7)
+#define APPL_CTRL_HW_HOT_RST_EN			BIT(20)
+#define APPL_CTRL_HW_HOT_RST_MODE_MASK		GENMASK(1, 0)
+#define APPL_CTRL_HW_HOT_RST_MODE_SHIFT		22
+#define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST	0x1
+
+#define APPL_INTR_EN_L0_0			0x8
+#define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN	BIT(0)
+#define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN	BIT(4)
+#define APPL_INTR_EN_L0_0_INT_INT_EN		BIT(8)
+#define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN	BIT(19)
+#define APPL_INTR_EN_L0_0_SYS_INTR_EN		BIT(30)
+#define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN	BIT(31)
+
+#define APPL_INTR_STATUS_L0			0xC
+#define APPL_INTR_STATUS_L0_LINK_STATE_INT	BIT(0)
+#define APPL_INTR_STATUS_L0_INT_INT		BIT(8)
+#define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT	BIT(18)
+
+#define APPL_INTR_EN_L1_0_0				0x1C
+#define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN	BIT(1)
+
+#define APPL_INTR_STATUS_L1_0_0				0x20
+#define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED	BIT(1)
+
+#define APPL_INTR_STATUS_L1_1			0x2C
+#define APPL_INTR_STATUS_L1_2			0x30
+#define APPL_INTR_STATUS_L1_3			0x34
+#define APPL_INTR_STATUS_L1_6			0x3C
+#define APPL_INTR_STATUS_L1_7			0x40
+
+#define APPL_INTR_EN_L1_8_0			0x44
+#define APPL_INTR_EN_L1_8_BW_MGT_INT_EN		BIT(2)
+#define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN	BIT(3)
+#define APPL_INTR_EN_L1_8_INTX_EN		BIT(11)
+#define APPL_INTR_EN_L1_8_AER_INT_EN		BIT(15)
+
+#define APPL_INTR_STATUS_L1_8_0			0x4C
+#define APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK	GENMASK(11, 6)
+#define APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS	BIT(2)
+#define APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS	BIT(3)
+
+#define APPL_INTR_STATUS_L1_9			0x54
+#define APPL_INTR_STATUS_L1_10			0x58
+#define APPL_INTR_STATUS_L1_11			0x64
+#define APPL_INTR_STATUS_L1_13			0x74
+#define APPL_INTR_STATUS_L1_14			0x78
+#define APPL_INTR_STATUS_L1_15			0x7C
+#define APPL_INTR_STATUS_L1_17			0x88
+
+#define APPL_INTR_EN_L1_18				0x90
+#define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMPLT		BIT(2)
+#define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR		BIT(1)
+#define APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR	BIT(0)
+
+#define APPL_INTR_STATUS_L1_18				0x94
+#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT	BIT(2)
+#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR	BIT(1)
+#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR	BIT(0)
+
+#define APPL_MSI_CTRL_2				0xB0
+
+#define APPL_LTR_MSG_1				0xC4
+#define LTR_MSG_REQ				BIT(15)
+#define LTR_MST_NO_SNOOP_SHIFT			16
+
+#define APPL_LTR_MSG_2				0xC8
+#define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE	BIT(3)
+
+#define APPL_LINK_STATUS			0xCC
+#define APPL_LINK_STATUS_RDLH_LINK_UP		BIT(0)
+
+#define APPL_DEBUG				0xD0
+#define APPL_DEBUG_PM_LINKST_IN_L2_LAT		BIT(21)
+#define APPL_DEBUG_PM_LINKST_IN_L0		0x11
+#define APPL_DEBUG_LTSSM_STATE_MASK		GENMASK(8, 3)
+#define APPL_DEBUG_LTSSM_STATE_SHIFT		3
+#define LTSSM_STATE_PRE_DETECT			5
+
+#define APPL_RADM_STATUS			0xE4
+#define APPL_PM_XMT_TURNOFF_STATE		BIT(0)
+
+#define APPL_DM_TYPE				0x100
+#define APPL_DM_TYPE_MASK			GENMASK(3, 0)
+#define APPL_DM_TYPE_RP				0x4
+#define APPL_DM_TYPE_EP				0x0
+
+#define APPL_CFG_BASE_ADDR			0x104
+#define APPL_CFG_BASE_ADDR_MASK			GENMASK(31, 12)
+
+#define APPL_CFG_IATU_DMA_BASE_ADDR		0x108
+#define APPL_CFG_IATU_DMA_BASE_ADDR_MASK	GENMASK(31, 18)
+
+#define APPL_CFG_MISC				0x110
+#define APPL_CFG_MISC_SLV_EP_MODE		BIT(14)
+#define APPL_CFG_MISC_ARCACHE_MASK		GENMASK(13, 10)
+#define APPL_CFG_MISC_ARCACHE_SHIFT		10
+#define APPL_CFG_MISC_ARCACHE_VAL		3
+
+#define APPL_CFG_SLCG_OVERRIDE			0x114
+#define APPL_CFG_SLCG_OVERRIDE_SLCG_EN_MASTER	BIT(0)
+
+#define APPL_CAR_RESET_OVRD				0x12C
+#define APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N	BIT(0)
+
+#define IO_BASE_IO_DECODE				BIT(0)
+#define IO_BASE_IO_DECODE_BIT8				BIT(8)
+
+#define CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE		BIT(0)
+#define CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE	BIT(16)
+
+#define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF	0x718
+#define CFG_TIMER_CTRL_ACK_NAK_SHIFT	(19)
+
+#define EVENT_COUNTER_ALL_CLEAR		0x3
+#define EVENT_COUNTER_ENABLE_ALL	0x7
+#define EVENT_COUNTER_ENABLE_SHIFT	2
+#define EVENT_COUNTER_EVENT_SEL_MASK	GENMASK(7, 0)
+#define EVENT_COUNTER_EVENT_SEL_SHIFT	16
+#define EVENT_COUNTER_EVENT_Tx_L0S	0x2
+#define EVENT_COUNTER_EVENT_Rx_L0S	0x3
+#define EVENT_COUNTER_EVENT_L1		0x5
+#define EVENT_COUNTER_EVENT_L1_1	0x7
+#define EVENT_COUNTER_EVENT_L1_2	0x8
+#define EVENT_COUNTER_GROUP_SEL_SHIFT	24
+#define EVENT_COUNTER_GROUP_5		0x5
+
+#define PORT_LOGIC_ACK_F_ASPM_CTRL			0x70C
+#define ENTER_ASPM					BIT(30)
+#define L0S_ENTRANCE_LAT_SHIFT				24
+#define L0S_ENTRANCE_LAT_MASK				GENMASK(26, 24)
+#define L1_ENTRANCE_LAT_SHIFT				27
+#define L1_ENTRANCE_LAT_MASK				GENMASK(29, 27)
+#define N_FTS_SHIFT					8
+#define N_FTS_MASK					GENMASK(7, 0)
+#define N_FTS_VAL					52
+
+#define PORT_LOGIC_GEN2_CTRL				0x80C
+#define PORT_LOGIC_GEN2_CTRL_DIRECT_SPEED_CHANGE	BIT(17)
+#define FTS_MASK					GENMASK(7, 0)
+#define FTS_VAL						52
+
+#define PORT_LOGIC_MSI_CTRL_INT_0_EN		0x828
+
+#define GEN3_EQ_CONTROL_OFF			0x8a8
+#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT	8
+#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK	GENMASK(23, 8)
+#define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK	GENMASK(3, 0)
+
+#define GEN3_RELATED_OFF			0x890
+#define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL	BIT(0)
+#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE	BIT(16)
+#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT	24
+#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK	GENMASK(25, 24)
+
+#define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT	0x8D0
+#define AMBA_ERROR_RESPONSE_CRS_SHIFT		3
+#define AMBA_ERROR_RESPONSE_CRS_MASK		GENMASK(1, 0)
+#define AMBA_ERROR_RESPONSE_CRS_OKAY		0
+#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF	1
+#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001	2
+
+#define PORT_LOGIC_MSIX_DOORBELL			0x948
+
+#define CAP_SPCIE_CAP_OFF			0x154
+#define CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK	GENMASK(3, 0)
+#define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK	GENMASK(11, 8)
+#define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT	8
+
+#define PME_ACK_TIMEOUT 10000
+
+#define LTSSM_TIMEOUT 50000	/* 50ms */
+
+#define GEN3_GEN4_EQ_PRESET_INIT	5
+
+#define GEN1_CORE_CLK_FREQ	62500000
+#define GEN2_CORE_CLK_FREQ	125000000
+#define GEN3_CORE_CLK_FREQ	250000000
+#define GEN4_CORE_CLK_FREQ	500000000
+
+static const unsigned int pcie_gen_freq[] = {
+	GEN1_CORE_CLK_FREQ,
+	GEN2_CORE_CLK_FREQ,
+	GEN3_CORE_CLK_FREQ,
+	GEN4_CORE_CLK_FREQ
+};
+
+static const u32 event_cntr_ctrl_offset[] = {
+	0x1d8,
+	0x1a8,
+	0x1a8,
+	0x1a8,
+	0x1c4,
+	0x1d8
+};
+
+static const u32 event_cntr_data_offset[] = {
+	0x1dc,
+	0x1ac,
+	0x1ac,
+	0x1ac,
+	0x1c8,
+	0x1dc
+};
+
+struct tegra_pcie_dw {
+	struct device *dev;
+	struct resource *appl_res;
+	struct resource *dbi_res;
+	struct resource *atu_dma_res;
+	void __iomem *appl_base;
+	struct clk *core_clk;
+	struct reset_control *core_apb_rst;
+	struct reset_control *core_rst;
+	struct dw_pcie pci;
+	struct tegra_bpmp *bpmp;
+
+	bool supports_clkreq;
+	bool enable_cdm_check;
+	bool link_state;
+	bool update_fc_fixup;
+	u8 init_link_width;
+	u32 msi_ctrl_int;
+	u32 num_lanes;
+	u32 max_speed;
+	u32 cid;
+	u32 cfg_link_cap_l1sub;
+	u32 pcie_cap_base;
+	u32 aspm_cmrt;
+	u32 aspm_pwr_on_t;
+	u32 aspm_l0s_enter_lat;
+
+	struct regulator *pex_ctl_supply;
+	struct regulator *slot_ctl_3v3;
+	struct regulator *slot_ctl_12v;
+
+	unsigned int phy_count;
+	struct phy **phys;
+
+	struct dentry *debugfs;
+};
+
+static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
+{
+	return container_of(pci, struct tegra_pcie_dw, pci);
+}
+
+static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value,
+			       const u32 reg)
+{
+	writel_relaxed(value, pcie->appl_base + reg);
+}
+
+static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)
+{
+	return readl_relaxed(pcie->appl_base + reg);
+}
+
+struct tegra_pcie_soc {
+	enum dw_pcie_device_mode mode;
+};
+
+static void apply_bad_link_workaround(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+	u32 current_link_width;
+	u16 val;
+
+	/*
+	 * NOTE:- Since this scenario is uncommon and link as such is not
+	 * stable anyway, not waiting to confirm if link is really
+	 * transitioning to Gen-2 speed
+	 */
+	val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
+	if (val & PCI_EXP_LNKSTA_LBMS) {
+		current_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
+		if (pcie->init_link_width > current_link_width) {
+			dev_warn(pci->dev, "PCIe link is bad, width reduced\n");
+			val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+						PCI_EXP_LNKCTL2);
+			val &= ~PCI_EXP_LNKCTL2_TLS;
+			val |= PCI_EXP_LNKCTL2_TLS_2_5GT;
+			dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
+					   PCI_EXP_LNKCTL2, val);
+
+			val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+						PCI_EXP_LNKCTL);
+			val |= PCI_EXP_LNKCTL_RL;
+			dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
+					   PCI_EXP_LNKCTL, val);
+		}
+	}
+}
+
+static irqreturn_t tegra_pcie_rp_irq_handler(struct tegra_pcie_dw *pcie)
+{
+	struct dw_pcie *pci = &pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	u32 val, status_l0, status_l1;
+	u16 val_w;
+
+	status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
+	if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
+		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
+		appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
+		if (status_l1 & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) {
+			/* SBR & Surprise Link Down WAR */
+			val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
+			val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
+			appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
+			udelay(1);
+			val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
+			val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
+			appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
+
+			val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL);
+			val |= PORT_LOGIC_GEN2_CTRL_DIRECT_SPEED_CHANGE;
+			dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val);
+		}
+	}
+
+	if (status_l0 & APPL_INTR_STATUS_L0_INT_INT) {
+		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0);
+		if (status_l1 & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) {
+			appl_writel(pcie,
+				    APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS,
+				    APPL_INTR_STATUS_L1_8_0);
+			apply_bad_link_workaround(pp);
+		}
+		if (status_l1 & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) {
+			appl_writel(pcie,
+				    APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS,
+				    APPL_INTR_STATUS_L1_8_0);
+
+			val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+						  PCI_EXP_LNKSTA);
+			dev_dbg(pci->dev, "Link Speed : Gen-%u\n", val_w &
+				PCI_EXP_LNKSTA_CLS);
+		}
+	}
+
+	if (status_l0 & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) {
+		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_18);
+		val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
+		if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) {
+			dev_info(pci->dev, "CDM check complete\n");
+			val |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE;
+		}
+		if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) {
+			dev_err(pci->dev, "CDM comparison mismatch\n");
+			val |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR;
+		}
+		if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) {
+			dev_err(pci->dev, "CDM Logic error\n");
+			val |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR;
+		}
+		dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
+		val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR);
+		dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", val);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_pcie_irq_handler(int irq, void *arg)
+{
+	struct tegra_pcie_dw *pcie = arg;
+
+	return tegra_pcie_rp_irq_handler(pcie);
+}
+
+static int tegra_pcie_dw_rd_own_conf(struct pcie_port *pp, int where, int size,
+				     u32 *val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+	/*
+	 * This is an endpoint mode specific register happen to appear even
+	 * when controller is operating in root port mode and system hangs
+	 * when it is accessed with link being in ASPM-L1 state.
+	 * So skip accessing it altogether
+	 */
+	if (where == PORT_LOGIC_MSIX_DOORBELL) {
+		*val = 0x00000000;
+		return PCIBIOS_SUCCESSFUL;
+	}
+
+	return dw_pcie_read(pci->dbi_base + where, size, val);
+}
+
+static int tegra_pcie_dw_wr_own_conf(struct pcie_port *pp, int where, int size,
+				     u32 val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+	/*
+	 * This is an endpoint mode specific register happen to appear even
+	 * when controller is operating in root port mode and system hangs
+	 * when it is accessed with link being in ASPM-L1 state.
+	 * So skip accessing it altogether
+	 */
+	if (where == PORT_LOGIC_MSIX_DOORBELL)
+		return PCIBIOS_SUCCESSFUL;
+
+	return dw_pcie_write(pci->dbi_base + where, size, val);
+}
+
+#if defined(CONFIG_PCIEASPM)
+static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
+{
+	u32 val;
+
+	val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
+	val &= ~PCI_L1SS_CAP_ASPM_L1_1;
+	dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
+}
+
+static void disable_aspm_l12(struct tegra_pcie_dw *pcie)
+{
+	u32 val;
+
+	val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
+	val &= ~PCI_L1SS_CAP_ASPM_L1_2;
+	dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
+}
+
+static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event)
+{
+	u32 val;
+
+	val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid]);
+	val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT);
+	val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
+	val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT;
+	val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
+	dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val);
+	val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_data_offset[pcie->cid]);
+
+	return val;
+}
+
+static int aspm_state_cnt(struct seq_file *s, void *data)
+{
+	struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *)
+				     dev_get_drvdata(s->private);
+	u32 val;
+
+	seq_printf(s, "Tx L0s entry count : %u\n",
+		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_Tx_L0S));
+
+	seq_printf(s, "Rx L0s entry count : %u\n",
+		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_Rx_L0S));
+
+	seq_printf(s, "Link L1 entry count : %u\n",
+		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1));
+
+	seq_printf(s, "Link L1.1 entry count : %u\n",
+		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_1));
+
+	seq_printf(s, "Link L1.2 entry count : %u\n",
+		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2));
+
+	/* Clear all counters */
+	dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid],
+			   EVENT_COUNTER_ALL_CLEAR);
+
+	/* Re-enable counting */
+	val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
+	val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
+	dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val);
+
+	return 0;
+}
+
+static void init_host_aspm(struct tegra_pcie_dw *pcie)
+{
+	struct dw_pcie *pci = &pcie->pci;
+	u32 val;
+
+	val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
+	pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP;
+
+	/* Enable ASPM counters */
+	val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
+	val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
+	dw_pcie_writel_dbi(pci, event_cntr_ctrl_offset[pcie->cid], val);
+
+	/* Program T_cmrt and T_pwr_on values */
+	val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
+	val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE);
+	val |= (pcie->aspm_cmrt << 8);
+	val |= (pcie->aspm_pwr_on_t << 19);
+	dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val);
+
+	/* Program L0s and L1 entrance latencies */
+	val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL);
+	val &= ~L0S_ENTRANCE_LAT_MASK;
+	val |= (pcie->aspm_l0s_enter_lat << L0S_ENTRANCE_LAT_SHIFT);
+	val |= ENTER_ASPM;
+	dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val);
+}
+
+static int init_debugfs(struct tegra_pcie_dw *pcie)
+{
+	struct dentry *d;
+
+	d = debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt",
+					pcie->debugfs, aspm_state_cnt);
+	if (IS_ERR_OR_NULL(d))
+		dev_err(pcie->dev,
+			"Failed to create debugfs file \"aspm_state_cnt\"\n");
+
+	return 0;
+}
+#else
+static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; }
+static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; }
+static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; }
+static inline int init_debugfs(struct tegra_pcie_dw *pcie) { return 0; }
+#endif
+
+static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+	u32 val;
+	u16 val_w;
+
+	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
+	val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
+	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
+
+	val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
+	val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN;
+	appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
+
+	if (pcie->enable_cdm_check) {
+		val = appl_readl(pcie, APPL_INTR_EN_L0_0);
+		val |= APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN;
+		appl_writel(pcie, val, APPL_INTR_EN_L0_0);
+
+		val = appl_readl(pcie, APPL_INTR_EN_L1_18);
+		val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR;
+		val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR;
+		appl_writel(pcie, val, APPL_INTR_EN_L1_18);
+	}
+
+	val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
+				  PCI_EXP_LNKSTA);
+	pcie->init_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val_w);
+
+	val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
+				  PCI_EXP_LNKCTL);
+	val_w |= PCI_EXP_LNKCTL_LBMIE;
+	dw_pcie_writew_dbi(&pcie->pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL,
+			   val_w);
+}
+
+static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+	u32 val;
+
+	/* Enable legacy interrupt generation */
+	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
+	val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
+	val |= APPL_INTR_EN_L0_0_INT_INT_EN;
+	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
+
+	val = appl_readl(pcie, APPL_INTR_EN_L1_8_0);
+	val |= APPL_INTR_EN_L1_8_INTX_EN;
+	val |= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN;
+	val |= APPL_INTR_EN_L1_8_BW_MGT_INT_EN;
+	if (IS_ENABLED(CONFIG_PCIEAER))
+		val |= APPL_INTR_EN_L1_8_AER_INT_EN;
+	appl_writel(pcie, val, APPL_INTR_EN_L1_8_0);
+}
+
+static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+	u32 val;
+
+	dw_pcie_msi_init(pp);
+
+	/* Enable MSI interrupt generation */
+	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
+	val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN;
+	val |= APPL_INTR_EN_L0_0_MSI_RCV_INT_EN;
+	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
+}
+
+static void tegra_pcie_enable_interrupts(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+	/* Clear interrupt statuses before enabling interrupts */
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
+
+	tegra_pcie_enable_system_interrupts(pp);
+	tegra_pcie_enable_legacy_interrupts(pp);
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		tegra_pcie_enable_msi_interrupts(pp);
+}
+
+static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
+{
+	struct dw_pcie *pci = &pcie->pci;
+	u32 val, offset, i;
+
+	/* Program init preset */
+	for (i = 0; i < pcie->num_lanes; i++) {
+		dw_pcie_read(pci->dbi_base + CAP_SPCIE_CAP_OFF
+				 + (i * 2), 2, &val);
+		val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK;
+		val |= GEN3_GEN4_EQ_PRESET_INIT;
+		val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK;
+		val |= (GEN3_GEN4_EQ_PRESET_INIT <<
+			   CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT);
+		dw_pcie_write(pci->dbi_base + CAP_SPCIE_CAP_OFF
+				 + (i * 2), 2, val);
+
+		offset = dw_pcie_find_ext_capability(pci,
+						     PCI_EXT_CAP_ID_PL_16GT) +
+				PCI_PL_16GT_LE_CTRL;
+		dw_pcie_read(pci->dbi_base + offset + i, 1, &val);
+		val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK;
+		val |= GEN3_GEN4_EQ_PRESET_INIT;
+		val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK;
+		val |= (GEN3_GEN4_EQ_PRESET_INIT <<
+			PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT);
+		dw_pcie_write(pci->dbi_base + offset + i, 1, val);
+	}
+
+	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+	val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
+	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+
+	val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
+	val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
+	val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
+	val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
+	dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
+
+	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+	val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
+	val |= (0x1 << GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT);
+	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+
+	val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
+	val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
+	val |= (0x360 << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
+	val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
+	dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
+
+	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+	val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
+	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+}
+
+static void tegra_pcie_prepare_host(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+	u32 val;
+
+	val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);
+	val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);
+	dw_pcie_writel_dbi(pci, PCI_IO_BASE, val);
+
+	val = dw_pcie_readl_dbi(pci, PCI_PREF_MEMORY_BASE);
+	val |= CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE;
+	val |= CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE;
+	dw_pcie_writel_dbi(pci, PCI_PREF_MEMORY_BASE, val);
+
+	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
+
+	/* Configure FTS */
+	val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL);
+	val &= ~(N_FTS_MASK << N_FTS_SHIFT);
+	val |= N_FTS_VAL << N_FTS_SHIFT;
+	dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val);
+
+	val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL);
+	val &= ~FTS_MASK;
+	val |= FTS_VAL;
+	dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val);
+
+	/* Enable as 0xFFFF0001 response for CRS */
+	val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);
+	val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT);
+	val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 <<
+		AMBA_ERROR_RESPONSE_CRS_SHIFT);
+	dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val);
+
+	/* Configure Max Speed from DT */
+	if (pcie->max_speed && pcie->max_speed != -EINVAL) {
+		val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base +
+					PCI_EXP_LNKCAP);
+		val &= ~PCI_EXP_LNKCAP_SLS;
+		val |= pcie->max_speed;
+		dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP,
+				   val);
+	}
+
+	/* Configure Max lane width from DT */
+	val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP);
+	val &= ~PCI_EXP_LNKCAP_MLW;
+	val |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, pcie->num_lanes);
+	dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val);
+
+	config_gen3_gen4_eq_presets(pcie);
+
+	init_host_aspm(pcie);
+
+	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+	val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+
+	if (pcie->update_fc_fixup) {
+		val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
+		val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
+		dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
+	}
+
+	dw_pcie_setup_rc(pp);
+
+	clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
+
+	/* Assert RST */
+	val = appl_readl(pcie, APPL_PINMUX);
+	val &= ~APPL_PINMUX_PEX_RST;
+	appl_writel(pcie, val, APPL_PINMUX);
+
+	usleep_range(100, 200);
+
+	/* Enable LTSSM */
+	val = appl_readl(pcie, APPL_CTRL);
+	val |= APPL_CTRL_LTSSM_EN;
+	appl_writel(pcie, val, APPL_CTRL);
+
+	/* De-assert RST */
+	val = appl_readl(pcie, APPL_PINMUX);
+	val |= APPL_PINMUX_PEX_RST;
+	appl_writel(pcie, val, APPL_PINMUX);
+
+	msleep(100);
+}
+
+static int tegra_pcie_dw_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+	u32 val, tmp, offset, speed;
+
+	tegra_pcie_prepare_host(pp);
+
+	if (dw_pcie_wait_for_link(pci)) {
+		/*
+		 * There are some endpoints which can't get the link up if
+		 * root port has Data Link Feature (DLF) enabled.
+		 * Refer Spec rev 4.0 ver 1.0 sec 3.4.2 & 7.7.4 for more info
+		 * on Scaled Flow Control and DLF.
+		 * So, need to confirm that is indeed the case here and attempt
+		 * link up once again with DLF disabled.
+		 */
+		val = appl_readl(pcie, APPL_DEBUG);
+		val &= APPL_DEBUG_LTSSM_STATE_MASK;
+		val >>= APPL_DEBUG_LTSSM_STATE_SHIFT;
+		tmp = appl_readl(pcie, APPL_LINK_STATUS);
+		tmp &= APPL_LINK_STATUS_RDLH_LINK_UP;
+		if (!(val == 0x11 && !tmp)) {
+			/* Link is down for all good reasons */
+			return 0;
+		}
+
+		dev_info(pci->dev, "Link is down in DLL");
+		dev_info(pci->dev, "Trying again with DLFE disabled\n");
+		/* Disable LTSSM */
+		val = appl_readl(pcie, APPL_CTRL);
+		val &= ~APPL_CTRL_LTSSM_EN;
+		appl_writel(pcie, val, APPL_CTRL);
+
+		reset_control_assert(pcie->core_rst);
+		reset_control_deassert(pcie->core_rst);
+
+		offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF);
+		val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP);
+		val &= ~PCI_DLF_EXCHANGE_ENABLE;
+		dw_pcie_writel_dbi(pci, offset + PCI_DLF_CAP, val);
+
+		tegra_pcie_prepare_host(pp);
+
+		if (dw_pcie_wait_for_link(pci))
+			return 0;
+	}
+
+	speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
+		PCI_EXP_LNKSTA_CLS;
+	clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
+
+	tegra_pcie_enable_interrupts(pp);
+
+	return 0;
+}
+
+static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
+{
+	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+	u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
+
+	return !!(val & PCI_EXP_LNKSTA_DLLLA);
+}
+
+static void tegra_pcie_set_msi_vec_num(struct pcie_port *pp)
+{
+	pp->num_vectors = MAX_MSI_IRQS;
+}
+
+static const struct dw_pcie_ops tegra_dw_pcie_ops = {
+	.link_up = tegra_pcie_dw_link_up,
+};
+
+static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
+	.rd_own_conf = tegra_pcie_dw_rd_own_conf,
+	.wr_own_conf = tegra_pcie_dw_wr_own_conf,
+	.host_init = tegra_pcie_dw_host_init,
+	.set_num_vectors = tegra_pcie_set_msi_vec_num,
+};
+
+static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
+{
+	unsigned int phy_count = pcie->phy_count;
+
+	while (phy_count--) {
+		phy_power_off(pcie->phys[phy_count]);
+		phy_exit(pcie->phys[phy_count]);
+	}
+}
+
+static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie)
+{
+	unsigned int i;
+	int ret;
+
+	for (i = 0; i < pcie->phy_count; i++) {
+		ret = phy_init(pcie->phys[i]);
+		if (ret < 0)
+			goto phy_power_off;
+
+		ret = phy_power_on(pcie->phys[i]);
+		if (ret < 0)
+			goto phy_exit;
+	}
+
+	return 0;
+
+phy_power_off:
+	while (i--) {
+		phy_power_off(pcie->phys[i]);
+phy_exit:
+		phy_exit(pcie->phys[i]);
+	}
+
+	return ret;
+}
+
+static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
+{
+	struct device_node *np = pcie->dev->of_node;
+	int ret;
+
+	ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt);
+	if (ret < 0) {
+		dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret);
+		return ret;
+	}
+
+	ret = of_property_read_u32(np, "nvidia,aspm-pwr-on-t-us",
+				   &pcie->aspm_pwr_on_t);
+	if (ret < 0)
+		dev_info(pcie->dev, "Failed to read ASPM Power On time: %d\n",
+			 ret);
+
+	ret = of_property_read_u32(np, "nvidia,aspm-l0s-entrance-latency-us",
+				   &pcie->aspm_l0s_enter_lat);
+	if (ret < 0)
+		dev_info(pcie->dev,
+			 "Failed to read ASPM L0s Entrance latency: %d\n", ret);
+
+	ret = of_property_read_u32(np, "num-lanes", &pcie->num_lanes);
+	if (ret < 0) {
+		dev_err(pcie->dev, "Failed to read num-lanes: %d\n", ret);
+		return ret;
+	}
+
+	pcie->max_speed = of_pci_get_max_link_speed(np);
+
+	ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid);
+	if (ret) {
+		dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret);
+		return ret;
+	}
+
+	ret = of_property_count_strings(np, "phy-names");
+	if (ret < 0) {
+		dev_err(pcie->dev, "Failed to find PHY entries: %d\n",
+			ret);
+		return ret;
+	}
+	pcie->phy_count = ret;
+
+	if (of_property_read_bool(np, "nvidia,update-fc-fixup"))
+		pcie->update_fc_fixup = true;
+
+	pcie->supports_clkreq =
+		of_property_read_bool(pcie->dev->of_node, "supports-clkreq");
+
+	pcie->enable_cdm_check =
+		of_property_read_bool(np, "snps,enable-cdm-check");
+
+	return 0;
+}
+
+static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
+					  bool enable)
+{
+	struct mrq_uphy_response resp;
+	struct tegra_bpmp_message msg;
+	struct mrq_uphy_request req;
+
+	/* Controller-5 doesn't need to have its state set by BPMP-FW */
+	if (pcie->cid == 5)
+		return 0;
+
+	memset(&req, 0, sizeof(req));
+	memset(&resp, 0, sizeof(resp));
+
+	req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE;
+	req.controller_state.pcie_controller = pcie->cid;
+	req.controller_state.enable = enable;
+
+	memset(&msg, 0, sizeof(msg));
+	msg.mrq = MRQ_UPHY;
+	msg.tx.data = &req;
+	msg.tx.size = sizeof(req);
+	msg.rx.data = &resp;
+	msg.rx.size = sizeof(resp);
+
+	return tegra_bpmp_transfer(pcie->bpmp, &msg);
+}
+
+static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
+{
+	struct pcie_port *pp = &pcie->pci.pp;
+	struct pci_bus *child, *root_bus = NULL;
+	struct pci_dev *pdev;
+
+	/*
+	 * link doesn't go into L2 state with some of the endpoints with Tegra
+	 * if they are not in D0 state. So, need to make sure that immediate
+	 * downstream devices are in D0 state before sending PME_TurnOff to put
+	 * link into L2 state.
+	 * This is as per PCI Express Base r4.0 v1.0 September 27-2017,
+	 * 5.2 Link State Power Management (Page #428).
+	 */
+
+	list_for_each_entry(child, &pp->root_bus->children, node) {
+		/* Bring downstream devices to D0 if they are not already in */
+		if (child->parent == pp->root_bus) {
+			root_bus = child;
+			break;
+		}
+	}
+
+	if (!root_bus) {
+		dev_err(pcie->dev, "Failed to find downstream devices\n");
+		return;
+	}
+
+	list_for_each_entry(pdev, &root_bus->devices, bus_list) {
+		if (PCI_SLOT(pdev->devfn) == 0) {
+			if (pci_set_power_state(pdev, PCI_D0))
+				dev_err(pcie->dev,
+					"Failed to transition %s to D0 state\n",
+					dev_name(&pdev->dev));
+		}
+	}
+}
+
+static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie)
+{
+	pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3");
+	if (IS_ERR(pcie->slot_ctl_3v3)) {
+		if (PTR_ERR(pcie->slot_ctl_3v3) != -ENODEV)
+			return PTR_ERR(pcie->slot_ctl_3v3);
+
+		pcie->slot_ctl_3v3 = NULL;
+	}
+
+	pcie->slot_ctl_12v = devm_regulator_get_optional(pcie->dev, "vpcie12v");
+	if (IS_ERR(pcie->slot_ctl_12v)) {
+		if (PTR_ERR(pcie->slot_ctl_12v) != -ENODEV)
+			return PTR_ERR(pcie->slot_ctl_12v);
+
+		pcie->slot_ctl_12v = NULL;
+	}
+
+	return 0;
+}
+
+static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie)
+{
+	int ret;
+
+	if (pcie->slot_ctl_3v3) {
+		ret = regulator_enable(pcie->slot_ctl_3v3);
+		if (ret < 0) {
+			dev_err(pcie->dev,
+				"Failed to enable 3.3V slot supply: %d\n", ret);
+			return ret;
+		}
+	}
+
+	if (pcie->slot_ctl_12v) {
+		ret = regulator_enable(pcie->slot_ctl_12v);
+		if (ret < 0) {
+			dev_err(pcie->dev,
+				"Failed to enable 12V slot supply: %d\n", ret);
+			goto fail_12v_enable;
+		}
+	}
+
+	/*
+	 * According to PCI Express Card Electromechanical Specification
+	 * Revision 1.1, Table-2.4, T_PVPERL (Power stable to PERST# inactive)
+	 * should be a minimum of 100ms.
+	 */
+	if (pcie->slot_ctl_3v3 || pcie->slot_ctl_12v)
+		msleep(100);
+
+	return 0;
+
+fail_12v_enable:
+	if (pcie->slot_ctl_3v3)
+		regulator_disable(pcie->slot_ctl_3v3);
+	return ret;
+}
+
+static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie)
+{
+	if (pcie->slot_ctl_12v)
+		regulator_disable(pcie->slot_ctl_12v);
+	if (pcie->slot_ctl_3v3)
+		regulator_disable(pcie->slot_ctl_3v3);
+}
+
+static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
+					bool en_hw_hot_rst)
+{
+	int ret;
+	u32 val;
+
+	ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);
+	if (ret) {
+		dev_err(pcie->dev,
+			"Failed to enable controller %u: %d\n", pcie->cid, ret);
+		return ret;
+	}
+
+	ret = tegra_pcie_enable_slot_regulators(pcie);
+	if (ret < 0)
+		goto fail_slot_reg_en;
+
+	ret = regulator_enable(pcie->pex_ctl_supply);
+	if (ret < 0) {
+		dev_err(pcie->dev, "Failed to enable regulator: %d\n", ret);
+		goto fail_reg_en;
+	}
+
+	ret = clk_prepare_enable(pcie->core_clk);
+	if (ret) {
+		dev_err(pcie->dev, "Failed to enable core clock: %d\n", ret);
+		goto fail_core_clk;
+	}
+
+	ret = reset_control_deassert(pcie->core_apb_rst);
+	if (ret) {
+		dev_err(pcie->dev, "Failed to deassert core APB reset: %d\n",
+			ret);
+		goto fail_core_apb_rst;
+	}
+
+	if (en_hw_hot_rst) {
+		/* Enable HW_HOT_RST mode */
+		val = appl_readl(pcie, APPL_CTRL);
+		val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
+			 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+		val |= APPL_CTRL_HW_HOT_RST_EN;
+		appl_writel(pcie, val, APPL_CTRL);
+	}
+
+	ret = tegra_pcie_enable_phy(pcie);
+	if (ret) {
+		dev_err(pcie->dev, "Failed to enable PHY: %d\n", ret);
+		goto fail_phy;
+	}
+
+	/* Update CFG base address */
+	appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
+		    APPL_CFG_BASE_ADDR);
+
+	/* Configure this core for RP mode operation */
+	appl_writel(pcie, APPL_DM_TYPE_RP, APPL_DM_TYPE);
+
+	appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
+
+	val = appl_readl(pcie, APPL_CTRL);
+	appl_writel(pcie, val | APPL_CTRL_SYS_PRE_DET_STATE, APPL_CTRL);
+
+	val = appl_readl(pcie, APPL_CFG_MISC);
+	val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
+	appl_writel(pcie, val, APPL_CFG_MISC);
+
+	if (!pcie->supports_clkreq) {
+		val = appl_readl(pcie, APPL_PINMUX);
+		val |= APPL_PINMUX_CLKREQ_OUT_OVRD_EN;
+		val |= APPL_PINMUX_CLKREQ_OUT_OVRD;
+		appl_writel(pcie, val, APPL_PINMUX);
+	}
+
+	/* Update iATU_DMA base address */
+	appl_writel(pcie,
+		    pcie->atu_dma_res->start & APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
+		    APPL_CFG_IATU_DMA_BASE_ADDR);
+
+	reset_control_deassert(pcie->core_rst);
+
+	pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
+						      PCI_CAP_ID_EXP);
+
+	/* Disable ASPM-L1SS advertisement as there is no CLKREQ routing */
+	if (!pcie->supports_clkreq) {
+		disable_aspm_l11(pcie);
+		disable_aspm_l12(pcie);
+	}
+
+	return ret;
+
+fail_phy:
+	reset_control_assert(pcie->core_apb_rst);
+fail_core_apb_rst:
+	clk_disable_unprepare(pcie->core_clk);
+fail_core_clk:
+	regulator_disable(pcie->pex_ctl_supply);
+fail_reg_en:
+	tegra_pcie_disable_slot_regulators(pcie);
+fail_slot_reg_en:
+	tegra_pcie_bpmp_set_ctrl_state(pcie, false);
+
+	return ret;
+}
+
+static int __deinit_controller(struct tegra_pcie_dw *pcie)
+{
+	int ret;
+
+	ret = reset_control_assert(pcie->core_rst);
+	if (ret) {
+		dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n",
+			ret);
+		return ret;
+	}
+
+	tegra_pcie_disable_phy(pcie);
+
+	ret = reset_control_assert(pcie->core_apb_rst);
+	if (ret) {
+		dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret);
+		return ret;
+	}
+
+	clk_disable_unprepare(pcie->core_clk);
+
+	ret = regulator_disable(pcie->pex_ctl_supply);
+	if (ret) {
+		dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret);
+		return ret;
+	}
+
+	tegra_pcie_disable_slot_regulators(pcie);
+
+	ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
+	if (ret) {
+		dev_err(pcie->dev, "Failed to disable controller %d: %d\n",
+			pcie->cid, ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
+{
+	struct dw_pcie *pci = &pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	int ret;
+
+	ret = tegra_pcie_config_controller(pcie, false);
+	if (ret < 0)
+		return ret;
+
+	pp->ops = &tegra_pcie_dw_host_ops;
+
+	ret = dw_pcie_host_init(pp);
+	if (ret < 0) {
+		dev_err(pcie->dev, "Failed to add PCIe port: %d\n", ret);
+		goto fail_host_init;
+	}
+
+	return 0;
+
+fail_host_init:
+	return __deinit_controller(pcie);
+}
+
+static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)
+{
+	u32 val;
+
+	if (!tegra_pcie_dw_link_up(&pcie->pci))
+		return 0;
+
+	val = appl_readl(pcie, APPL_RADM_STATUS);
+	val |= APPL_PM_XMT_TURNOFF_STATE;
+	appl_writel(pcie, val, APPL_RADM_STATUS);
+
+	return readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, val,
+				 val & APPL_DEBUG_PM_LINKST_IN_L2_LAT,
+				 1, PME_ACK_TIMEOUT);
+}
+
+static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
+{
+	u32 data;
+	int err;
+
+	if (!tegra_pcie_dw_link_up(&pcie->pci)) {
+		dev_dbg(pcie->dev, "PCIe link is not up...!\n");
+		return;
+	}
+
+	if (tegra_pcie_try_link_l2(pcie)) {
+		dev_info(pcie->dev, "Link didn't transition to L2 state\n");
+		/*
+		 * TX lane clock freq will reset to Gen1 only if link is in L2
+		 * or detect state.
+		 * So apply pex_rst to end point to force RP to go into detect
+		 * state
+		 */
+		data = appl_readl(pcie, APPL_PINMUX);
+		data &= ~APPL_PINMUX_PEX_RST;
+		appl_writel(pcie, data, APPL_PINMUX);
+
+		err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG,
+						data,
+						((data &
+						APPL_DEBUG_LTSSM_STATE_MASK) >>
+						APPL_DEBUG_LTSSM_STATE_SHIFT) ==
+						LTSSM_STATE_PRE_DETECT,
+						1, LTSSM_TIMEOUT);
+		if (err) {
+			dev_info(pcie->dev, "Link didn't go to detect state\n");
+		} else {
+			/* Disable LTSSM after link is in detect state */
+			data = appl_readl(pcie, APPL_CTRL);
+			data &= ~APPL_CTRL_LTSSM_EN;
+			appl_writel(pcie, data, APPL_CTRL);
+		}
+	}
+	/*
+	 * DBI registers may not be accessible after this as PLL-E would be
+	 * down depending on how CLKREQ is pulled by end point
+	 */
+	data = appl_readl(pcie, APPL_PINMUX);
+	data |= (APPL_PINMUX_CLKREQ_OVERRIDE_EN | APPL_PINMUX_CLKREQ_OVERRIDE);
+	/* Cut REFCLK to slot */
+	data |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
+	data &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
+	appl_writel(pcie, data, APPL_PINMUX);
+}
+
+static int tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
+{
+	tegra_pcie_downstream_dev_to_D0(pcie);
+	dw_pcie_host_deinit(&pcie->pci.pp);
+	tegra_pcie_dw_pme_turnoff(pcie);
+
+	return __deinit_controller(pcie);
+}
+
+static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
+{
+	struct pcie_port *pp = &pcie->pci.pp;
+	struct device *dev = pcie->dev;
+	char *name;
+	int ret;
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		pp->msi_irq = of_irq_get_byname(dev->of_node, "msi");
+		if (!pp->msi_irq) {
+			dev_err(dev, "Failed to get MSI interrupt\n");
+			return -ENODEV;
+		}
+	}
+
+	pm_runtime_enable(dev);
+
+	ret = pm_runtime_get_sync(dev);
+	if (ret < 0) {
+		dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
+			ret);
+		goto fail_pm_get_sync;
+	}
+
+	ret = pinctrl_pm_select_default_state(dev);
+	if (ret < 0) {
+		dev_err(dev, "Failed to configure sideband pins: %d\n", ret);
+		goto fail_pm_get_sync;
+	}
+
+	tegra_pcie_init_controller(pcie);
+
+	pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci);
+	if (!pcie->link_state) {
+		ret = -ENOMEDIUM;
+		goto fail_host_init;
+	}
+
+	name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
+	if (!name) {
+		ret = -ENOMEM;
+		goto fail_host_init;
+	}
+
+	pcie->debugfs = debugfs_create_dir(name, NULL);
+	if (!pcie->debugfs)
+		dev_err(dev, "Failed to create debugfs\n");
+	else
+		init_debugfs(pcie);
+
+	return ret;
+
+fail_host_init:
+	tegra_pcie_deinit_controller(pcie);
+fail_pm_get_sync:
+	pm_runtime_put_sync(dev);
+	pm_runtime_disable(dev);
+	return ret;
+}
+
+static int tegra_pcie_dw_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *atu_dma_res;
+	struct tegra_pcie_dw *pcie;
+	struct resource *dbi_res;
+	struct pcie_port *pp;
+	struct dw_pcie *pci;
+	struct phy **phys;
+	char *name;
+	int ret;
+	u32 i;
+
+	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+	if (!pcie)
+		return -ENOMEM;
+
+	pci = &pcie->pci;
+	pci->dev = &pdev->dev;
+	pci->ops = &tegra_dw_pcie_ops;
+	pp = &pci->pp;
+	pcie->dev = &pdev->dev;
+
+	ret = tegra_pcie_dw_parse_dt(pcie);
+	if (ret < 0) {
+		dev_err(dev, "Failed to parse device tree: %d\n", ret);
+		return ret;
+	}
+
+	ret = tegra_pcie_get_slot_regulators(pcie);
+	if (ret < 0) {
+		dev_err(dev, "Failed to get slot regulators: %d\n", ret);
+		return ret;
+	}
+
+	pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl");
+	if (IS_ERR(pcie->pex_ctl_supply)) {
+		ret = PTR_ERR(pcie->pex_ctl_supply);
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "Failed to get regulator: %ld\n",
+				PTR_ERR(pcie->pex_ctl_supply));
+		return ret;
+	}
+
+	pcie->core_clk = devm_clk_get(dev, "core");
+	if (IS_ERR(pcie->core_clk)) {
+		dev_err(dev, "Failed to get core clock: %ld\n",
+			PTR_ERR(pcie->core_clk));
+		return PTR_ERR(pcie->core_clk);
+	}
+
+	pcie->appl_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						      "appl");
+	if (!pcie->appl_res) {
+		dev_err(dev, "Failed to find \"appl\" region\n");
+		return -ENODEV;
+	}
+
+	pcie->appl_base = devm_ioremap_resource(dev, pcie->appl_res);
+	if (IS_ERR(pcie->appl_base))
+		return PTR_ERR(pcie->appl_base);
+
+	pcie->core_apb_rst = devm_reset_control_get(dev, "apb");
+	if (IS_ERR(pcie->core_apb_rst)) {
+		dev_err(dev, "Failed to get APB reset: %ld\n",
+			PTR_ERR(pcie->core_apb_rst));
+		return PTR_ERR(pcie->core_apb_rst);
+	}
+
+	phys = devm_kcalloc(dev, pcie->phy_count, sizeof(*phys), GFP_KERNEL);
+	if (!phys)
+		return -ENOMEM;
+
+	for (i = 0; i < pcie->phy_count; i++) {
+		name = kasprintf(GFP_KERNEL, "p2u-%u", i);
+		if (!name) {
+			dev_err(dev, "Failed to create P2U string\n");
+			return -ENOMEM;
+		}
+		phys[i] = devm_phy_get(dev, name);
+		kfree(name);
+		if (IS_ERR(phys[i])) {
+			ret = PTR_ERR(phys[i]);
+			if (ret != -EPROBE_DEFER)
+				dev_err(dev, "Failed to get PHY: %d\n", ret);
+			return ret;
+		}
+	}
+
+	pcie->phys = phys;
+
+	dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+	if (!dbi_res) {
+		dev_err(dev, "Failed to find \"dbi\" region\n");
+		return -ENODEV;
+	}
+	pcie->dbi_res = dbi_res;
+
+	pci->dbi_base = devm_ioremap_resource(dev, dbi_res);
+	if (IS_ERR(pci->dbi_base))
+		return PTR_ERR(pci->dbi_base);
+
+	/* Tegra HW locates DBI2 at a fixed offset from DBI */
+	pci->dbi_base2 = pci->dbi_base + 0x1000;
+
+	atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						   "atu_dma");
+	if (!atu_dma_res) {
+		dev_err(dev, "Failed to find \"atu_dma\" region\n");
+		return -ENODEV;
+	}
+	pcie->atu_dma_res = atu_dma_res;
+
+	pci->atu_base = devm_ioremap_resource(dev, atu_dma_res);
+	if (IS_ERR(pci->atu_base))
+		return PTR_ERR(pci->atu_base);
+
+	pcie->core_rst = devm_reset_control_get(dev, "core");
+	if (IS_ERR(pcie->core_rst)) {
+		dev_err(dev, "Failed to get core reset: %ld\n",
+			PTR_ERR(pcie->core_rst));
+		return PTR_ERR(pcie->core_rst);
+	}
+
+	pp->irq = platform_get_irq_byname(pdev, "intr");
+	if (!pp->irq) {
+		dev_err(dev, "Failed to get \"intr\" interrupt\n");
+		return -ENODEV;
+	}
+
+	ret = devm_request_irq(dev, pp->irq, tegra_pcie_irq_handler,
+			       IRQF_SHARED, "tegra-pcie-intr", pcie);
+	if (ret) {
+		dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, ret);
+		return ret;
+	}
+
+	pcie->bpmp = tegra_bpmp_get(dev);
+	if (IS_ERR(pcie->bpmp))
+		return PTR_ERR(pcie->bpmp);
+
+	platform_set_drvdata(pdev, pcie);
+
+	ret = tegra_pcie_config_rp(pcie);
+	if (ret && ret != -ENOMEDIUM)
+		goto fail;
+	else
+		return 0;
+
+fail:
+	tegra_bpmp_put(pcie->bpmp);
+	return ret;
+}
+
+static int tegra_pcie_dw_remove(struct platform_device *pdev)
+{
+	struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
+
+	if (!pcie->link_state)
+		return 0;
+
+	debugfs_remove_recursive(pcie->debugfs);
+	tegra_pcie_deinit_controller(pcie);
+	pm_runtime_put_sync(pcie->dev);
+	pm_runtime_disable(pcie->dev);
+	tegra_bpmp_put(pcie->bpmp);
+
+	return 0;
+}
+
+static int tegra_pcie_dw_suspend_late(struct device *dev)
+{
+	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+	u32 val;
+
+	if (!pcie->link_state)
+		return 0;
+
+	/* Enable HW_HOT_RST mode */
+	val = appl_readl(pcie, APPL_CTRL);
+	val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
+		 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+	val |= APPL_CTRL_HW_HOT_RST_EN;
+	appl_writel(pcie, val, APPL_CTRL);
+
+	return 0;
+}
+
+static int tegra_pcie_dw_suspend_noirq(struct device *dev)
+{
+	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+
+	if (!pcie->link_state)
+		return 0;
+
+	/* Save MSI interrupt vector */
+	pcie->msi_ctrl_int = dw_pcie_readl_dbi(&pcie->pci,
+					       PORT_LOGIC_MSI_CTRL_INT_0_EN);
+	tegra_pcie_downstream_dev_to_D0(pcie);
+	tegra_pcie_dw_pme_turnoff(pcie);
+
+	return __deinit_controller(pcie);
+}
+
+static int tegra_pcie_dw_resume_noirq(struct device *dev)
+{
+	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+	int ret;
+
+	if (!pcie->link_state)
+		return 0;
+
+	ret = tegra_pcie_config_controller(pcie, true);
+	if (ret < 0)
+		return ret;
+
+	ret = tegra_pcie_dw_host_init(&pcie->pci.pp);
+	if (ret < 0) {
+		dev_err(dev, "Failed to init host: %d\n", ret);
+		goto fail_host_init;
+	}
+
+	/* Restore MSI interrupt vector */
+	dw_pcie_writel_dbi(&pcie->pci, PORT_LOGIC_MSI_CTRL_INT_0_EN,
+			   pcie->msi_ctrl_int);
+
+	return 0;
+
+fail_host_init:
+	return __deinit_controller(pcie);
+}
+
+static int tegra_pcie_dw_resume_early(struct device *dev)
+{
+	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+	u32 val;
+
+	if (!pcie->link_state)
+		return 0;
+
+	/* Disable HW_HOT_RST mode */
+	val = appl_readl(pcie, APPL_CTRL);
+	val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
+		 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+	val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST <<
+	       APPL_CTRL_HW_HOT_RST_MODE_SHIFT;
+	val &= ~APPL_CTRL_HW_HOT_RST_EN;
+	appl_writel(pcie, val, APPL_CTRL);
+
+	return 0;
+}
+
+static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
+{
+	struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
+
+	if (!pcie->link_state)
+		return;
+
+	debugfs_remove_recursive(pcie->debugfs);
+	tegra_pcie_downstream_dev_to_D0(pcie);
+
+	disable_irq(pcie->pci.pp.irq);
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		disable_irq(pcie->pci.pp.msi_irq);
+
+	tegra_pcie_dw_pme_turnoff(pcie);
+	__deinit_controller(pcie);
+}
+
+static const struct of_device_id tegra_pcie_dw_of_match[] = {
+	{
+		.compatible = "nvidia,tegra194-pcie",
+	},
+	{},
+};
+
+static const struct dev_pm_ops tegra_pcie_dw_pm_ops = {
+	.suspend_late = tegra_pcie_dw_suspend_late,
+	.suspend_noirq = tegra_pcie_dw_suspend_noirq,
+	.resume_noirq = tegra_pcie_dw_resume_noirq,
+	.resume_early = tegra_pcie_dw_resume_early,
+};
+
+static struct platform_driver tegra_pcie_dw_driver = {
+	.probe = tegra_pcie_dw_probe,
+	.remove = tegra_pcie_dw_remove,
+	.shutdown = tegra_pcie_dw_shutdown,
+	.driver = {
+		.name	= "tegra194-pcie",
+		.pm = &tegra_pcie_dw_pm_ops,
+		.of_match_table = tegra_pcie_dw_of_match,
+	},
+};
+module_platform_driver(tegra_pcie_dw_driver);
+
+MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);
+
+MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-uniphier.c b/marvell/linux/drivers/pci/controller/dwc/pcie-uniphier.c
new file mode 100644
index 0000000..3f30ee4
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -0,0 +1,476 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for UniPhier SoCs
+ * Copyright 2018 Socionext Inc.
+ * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+#define PCL_PINCTRL0			0x002c
+#define PCL_PERST_PLDN_REGEN		BIT(12)
+#define PCL_PERST_NOE_REGEN		BIT(11)
+#define PCL_PERST_OUT_REGEN		BIT(8)
+#define PCL_PERST_PLDN_REGVAL		BIT(4)
+#define PCL_PERST_NOE_REGVAL		BIT(3)
+#define PCL_PERST_OUT_REGVAL		BIT(0)
+
+#define PCL_PIPEMON			0x0044
+#define PCL_PCLK_ALIVE			BIT(15)
+
+#define PCL_APP_READY_CTRL		0x8008
+#define PCL_APP_LTSSM_ENABLE		BIT(0)
+
+#define PCL_APP_PM0			0x8078
+#define PCL_SYS_AUX_PWR_DET		BIT(8)
+
+#define PCL_RCV_INT			0x8108
+#define PCL_RCV_INT_ALL_ENABLE		GENMASK(20, 17)
+#define PCL_CFG_BW_MGT_STATUS		BIT(4)
+#define PCL_CFG_LINK_AUTO_BW_STATUS	BIT(3)
+#define PCL_CFG_AER_RC_ERR_MSI_STATUS	BIT(2)
+#define PCL_CFG_PME_MSI_STATUS		BIT(1)
+
+#define PCL_RCV_INTX			0x810c
+#define PCL_RCV_INTX_ALL_ENABLE		GENMASK(19, 16)
+#define PCL_RCV_INTX_ALL_MASK		GENMASK(11, 8)
+#define PCL_RCV_INTX_MASK_SHIFT		8
+#define PCL_RCV_INTX_ALL_STATUS		GENMASK(3, 0)
+#define PCL_RCV_INTX_STATUS_SHIFT	0
+
+#define PCL_STATUS_LINK			0x8140
+#define PCL_RDLH_LINK_UP		BIT(1)
+#define PCL_XMLH_LINK_UP		BIT(0)
+
+struct uniphier_pcie_priv {
+	void __iomem *base;
+	struct dw_pcie pci;
+	struct clk *clk;
+	struct reset_control *rst;
+	struct phy *phy;
+	struct irq_domain *legacy_irq_domain;
+};
+
+#define to_uniphier_pcie(x)	dev_get_drvdata((x)->dev)
+
+static void uniphier_pcie_ltssm_enable(struct uniphier_pcie_priv *priv,
+				       bool enable)
+{
+	u32 val;
+
+	val = readl(priv->base + PCL_APP_READY_CTRL);
+	if (enable)
+		val |= PCL_APP_LTSSM_ENABLE;
+	else
+		val &= ~PCL_APP_LTSSM_ENABLE;
+	writel(val, priv->base + PCL_APP_READY_CTRL);
+}
+
+static void uniphier_pcie_init_rc(struct uniphier_pcie_priv *priv)
+{
+	u32 val;
+
+	/* use auxiliary power detection */
+	val = readl(priv->base + PCL_APP_PM0);
+	val |= PCL_SYS_AUX_PWR_DET;
+	writel(val, priv->base + PCL_APP_PM0);
+
+	/* assert PERST# */
+	val = readl(priv->base + PCL_PINCTRL0);
+	val &= ~(PCL_PERST_NOE_REGVAL | PCL_PERST_OUT_REGVAL
+		 | PCL_PERST_PLDN_REGVAL);
+	val |= PCL_PERST_NOE_REGEN | PCL_PERST_OUT_REGEN
+		| PCL_PERST_PLDN_REGEN;
+	writel(val, priv->base + PCL_PINCTRL0);
+
+	uniphier_pcie_ltssm_enable(priv, false);
+
+	usleep_range(100000, 200000);
+
+	/* deassert PERST# */
+	val = readl(priv->base + PCL_PINCTRL0);
+	val |= PCL_PERST_OUT_REGVAL | PCL_PERST_OUT_REGEN;
+	writel(val, priv->base + PCL_PINCTRL0);
+}
+
+static int uniphier_pcie_wait_rc(struct uniphier_pcie_priv *priv)
+{
+	u32 status;
+	int ret;
+
+	/* wait PIPE clock */
+	ret = readl_poll_timeout(priv->base + PCL_PIPEMON, status,
+				 status & PCL_PCLK_ALIVE, 100000, 1000000);
+	if (ret) {
+		dev_err(priv->pci.dev,
+			"Failed to initialize controller in RC mode\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int uniphier_pcie_link_up(struct dw_pcie *pci)
+{
+	struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+	u32 val, mask;
+
+	val = readl(priv->base + PCL_STATUS_LINK);
+	mask = PCL_RDLH_LINK_UP | PCL_XMLH_LINK_UP;
+
+	return (val & mask) == mask;
+}
+
+static int uniphier_pcie_establish_link(struct dw_pcie *pci)
+{
+	struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+
+	if (dw_pcie_link_up(pci))
+		return 0;
+
+	uniphier_pcie_ltssm_enable(priv, true);
+
+	return dw_pcie_wait_for_link(pci);
+}
+
+static void uniphier_pcie_stop_link(struct dw_pcie *pci)
+{
+	struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+
+	uniphier_pcie_ltssm_enable(priv, false);
+}
+
+static void uniphier_pcie_irq_enable(struct uniphier_pcie_priv *priv)
+{
+	writel(PCL_RCV_INT_ALL_ENABLE, priv->base + PCL_RCV_INT);
+	writel(PCL_RCV_INTX_ALL_ENABLE, priv->base + PCL_RCV_INTX);
+}
+
+static void uniphier_pcie_irq_disable(struct uniphier_pcie_priv *priv)
+{
+	writel(0, priv->base + PCL_RCV_INT);
+	writel(0, priv->base + PCL_RCV_INTX);
+}
+
+static void uniphier_pcie_irq_ack(struct irq_data *d)
+{
+	struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+	u32 val;
+
+	val = readl(priv->base + PCL_RCV_INTX);
+	val &= ~PCL_RCV_INTX_ALL_STATUS;
+	val |= BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_STATUS_SHIFT);
+	writel(val, priv->base + PCL_RCV_INTX);
+}
+
+static void uniphier_pcie_irq_mask(struct irq_data *d)
+{
+	struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+	u32 val;
+
+	val = readl(priv->base + PCL_RCV_INTX);
+	val &= ~PCL_RCV_INTX_ALL_MASK;
+	val |= BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT);
+	writel(val, priv->base + PCL_RCV_INTX);
+}
+
+static void uniphier_pcie_irq_unmask(struct irq_data *d)
+{
+	struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+	u32 val;
+
+	val = readl(priv->base + PCL_RCV_INTX);
+	val &= ~PCL_RCV_INTX_ALL_MASK;
+	val &= ~BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT);
+	writel(val, priv->base + PCL_RCV_INTX);
+}
+
+static struct irq_chip uniphier_pcie_irq_chip = {
+	.name = "PCI",
+	.irq_ack = uniphier_pcie_irq_ack,
+	.irq_mask = uniphier_pcie_irq_mask,
+	.irq_unmask = uniphier_pcie_irq_unmask,
+};
+
+static int uniphier_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+				  irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &uniphier_pcie_irq_chip,
+				 handle_level_irq);
+	irq_set_chip_data(irq, domain->host_data);
+
+	return 0;
+}
+
+static const struct irq_domain_ops uniphier_intx_domain_ops = {
+	.map = uniphier_pcie_intx_map,
+};
+
+static void uniphier_pcie_irq_handler(struct irq_desc *desc)
+{
+	struct pcie_port *pp = irq_desc_get_handler_data(desc);
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	unsigned long reg;
+	u32 val, bit, virq;
+
+	/* INT for debug */
+	val = readl(priv->base + PCL_RCV_INT);
+
+	if (val & PCL_CFG_BW_MGT_STATUS)
+		dev_dbg(pci->dev, "Link Bandwidth Management Event\n");
+	if (val & PCL_CFG_LINK_AUTO_BW_STATUS)
+		dev_dbg(pci->dev, "Link Autonomous Bandwidth Event\n");
+	if (val & PCL_CFG_AER_RC_ERR_MSI_STATUS)
+		dev_dbg(pci->dev, "Root Error\n");
+	if (val & PCL_CFG_PME_MSI_STATUS)
+		dev_dbg(pci->dev, "PME Interrupt\n");
+
+	writel(val, priv->base + PCL_RCV_INT);
+
+	/* INTx */
+	chained_irq_enter(chip, desc);
+
+	val = readl(priv->base + PCL_RCV_INTX);
+	reg = FIELD_GET(PCL_RCV_INTX_ALL_STATUS, val);
+
+	for_each_set_bit(bit, &reg, PCI_NUM_INTX) {
+		virq = irq_linear_revmap(priv->legacy_irq_domain, bit);
+		generic_handle_irq(virq);
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+	struct device_node *np = pci->dev->of_node;
+	struct device_node *np_intc;
+	int ret = 0;
+
+	np_intc = of_get_child_by_name(np, "legacy-interrupt-controller");
+	if (!np_intc) {
+		dev_err(pci->dev, "Failed to get legacy-interrupt-controller node\n");
+		return -EINVAL;
+	}
+
+	pp->irq = irq_of_parse_and_map(np_intc, 0);
+	if (!pp->irq) {
+		dev_err(pci->dev, "Failed to get an IRQ entry in legacy-interrupt-controller\n");
+		ret = -EINVAL;
+		goto out_put_node;
+	}
+
+	priv->legacy_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX,
+						&uniphier_intx_domain_ops, pp);
+	if (!priv->legacy_irq_domain) {
+		dev_err(pci->dev, "Failed to get INTx domain\n");
+		ret = -ENODEV;
+		goto out_put_node;
+	}
+
+	irq_set_chained_handler_and_data(pp->irq, uniphier_pcie_irq_handler,
+					 pp);
+
+out_put_node:
+	of_node_put(np_intc);
+	return ret;
+}
+
+static int uniphier_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+	int ret;
+
+	ret = uniphier_pcie_config_legacy_irq(pp);
+	if (ret)
+		return ret;
+
+	uniphier_pcie_irq_enable(priv);
+
+	dw_pcie_setup_rc(pp);
+	ret = uniphier_pcie_establish_link(pci);
+	if (ret)
+		return ret;
+
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		dw_pcie_msi_init(pp);
+
+	return 0;
+}
+
+static const struct dw_pcie_host_ops uniphier_pcie_host_ops = {
+	.host_init = uniphier_pcie_host_init,
+};
+
+static int uniphier_add_pcie_port(struct uniphier_pcie_priv *priv,
+				  struct platform_device *pdev)
+{
+	struct dw_pcie *pci = &priv->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	pp->ops = &uniphier_pcie_host_ops;
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		pp->msi_irq = platform_get_irq_byname(pdev, "msi");
+		if (pp->msi_irq < 0)
+			return pp->msi_irq;
+	}
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "Failed to initialize host (%d)\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int uniphier_pcie_host_enable(struct uniphier_pcie_priv *priv)
+{
+	int ret;
+
+	ret = clk_prepare_enable(priv->clk);
+	if (ret)
+		return ret;
+
+	ret = reset_control_deassert(priv->rst);
+	if (ret)
+		goto out_clk_disable;
+
+	uniphier_pcie_init_rc(priv);
+
+	ret = phy_init(priv->phy);
+	if (ret)
+		goto out_rst_assert;
+
+	ret = uniphier_pcie_wait_rc(priv);
+	if (ret)
+		goto out_phy_exit;
+
+	return 0;
+
+out_phy_exit:
+	phy_exit(priv->phy);
+out_rst_assert:
+	reset_control_assert(priv->rst);
+out_clk_disable:
+	clk_disable_unprepare(priv->clk);
+
+	return ret;
+}
+
+static void uniphier_pcie_host_disable(struct uniphier_pcie_priv *priv)
+{
+	uniphier_pcie_irq_disable(priv);
+	phy_exit(priv->phy);
+	reset_control_assert(priv->rst);
+	clk_disable_unprepare(priv->clk);
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	.start_link = uniphier_pcie_establish_link,
+	.stop_link = uniphier_pcie_stop_link,
+	.link_up = uniphier_pcie_link_up,
+};
+
+static int uniphier_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct uniphier_pcie_priv *priv;
+	struct resource *res;
+	int ret;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->pci.dev = dev;
+	priv->pci.ops = &dw_pcie_ops;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+	priv->pci.dbi_base = devm_pci_remap_cfg_resource(dev, res);
+	if (IS_ERR(priv->pci.dbi_base))
+		return PTR_ERR(priv->pci.dbi_base);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "link");
+	priv->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(priv->base))
+		return PTR_ERR(priv->base);
+
+	priv->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(priv->clk))
+		return PTR_ERR(priv->clk);
+
+	priv->rst = devm_reset_control_get_shared(dev, NULL);
+	if (IS_ERR(priv->rst))
+		return PTR_ERR(priv->rst);
+
+	priv->phy = devm_phy_optional_get(dev, "pcie-phy");
+	if (IS_ERR(priv->phy))
+		return PTR_ERR(priv->phy);
+
+	platform_set_drvdata(pdev, priv);
+
+	ret = uniphier_pcie_host_enable(priv);
+	if (ret)
+		return ret;
+
+	return uniphier_add_pcie_port(priv, pdev);
+}
+
+static int uniphier_pcie_remove(struct platform_device *pdev)
+{
+	struct uniphier_pcie_priv *priv = platform_get_drvdata(pdev);
+
+	uniphier_pcie_host_disable(priv);
+
+	return 0;
+}
+
+static const struct of_device_id uniphier_pcie_match[] = {
+	{ .compatible = "socionext,uniphier-pcie", },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, uniphier_pcie_match);
+
+static struct platform_driver uniphier_pcie_driver = {
+	.probe  = uniphier_pcie_probe,
+	.remove = uniphier_pcie_remove,
+	.driver = {
+		.name = "uniphier-pcie",
+		.of_match_table = uniphier_pcie_match,
+	},
+};
+builtin_platform_driver(uniphier_pcie_driver);
+
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PCIe host controller driver");
+MODULE_LICENSE("GPL v2");