ASR_BASE

Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/linux/drivers/pci/controller/Kconfig b/marvell/linux/drivers/pci/controller/Kconfig
new file mode 100644
index 0000000..89f1f8d
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/Kconfig
@@ -0,0 +1,298 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "PCI controller drivers"
+	depends on PCI
+
+config PCI_MVEBU
+	bool "Marvell EBU PCIe controller"
+	depends on ARCH_MVEBU || ARCH_DOVE || COMPILE_TEST
+	depends on MVEBU_MBUS
+	depends on ARM
+	depends on OF
+	select PCI_BRIDGE_EMUL
+
+config PCI_AARDVARK
+	bool "Aardvark PCIe controller"
+	depends on (ARCH_MVEBU && ARM64) || COMPILE_TEST
+	depends on OF
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCI_BRIDGE_EMUL
+	help
+	 Add support for Aardvark 64bit PCIe Host Controller. This
+	 controller is part of the South Bridge of the Marvel Armada
+	 3700 SoC.
+
+menu "Cadence PCIe controllers support"
+
+config PCIE_CADENCE
+	bool
+
+config PCIE_CADENCE_HOST
+	bool "Cadence PCIe host controller"
+	depends on OF
+	depends on PCI
+	select IRQ_DOMAIN
+	select PCIE_CADENCE
+	help
+	  Say Y here if you want to support the Cadence PCIe controller in host
+	  mode. This PCIe controller may be embedded into many different vendors
+	  SoCs.
+
+config PCIE_CADENCE_EP
+	bool "Cadence PCIe endpoint controller"
+	depends on OF
+	depends on PCI_ENDPOINT
+	select PCIE_CADENCE
+	help
+	  Say Y here if you want to support the Cadence PCIe  controller in
+	  endpoint mode. This PCIe controller may be embedded into many
+	  different vendors SoCs.
+
+endmenu
+
+config PCIE_XILINX_NWL
+	bool "NWL PCIe Core"
+	depends on ARCH_ZYNQMP || COMPILE_TEST
+	depends on PCI_MSI_IRQ_DOMAIN
+	help
+	 Say 'Y' here if you want kernel support for Xilinx
+	 NWL PCIe controller. The controller can act as Root Port
+	 or End Point. The current option selection will only
+	 support root port enabling.
+
+config PCI_FTPCI100
+	bool "Faraday Technology FTPCI100 PCI controller"
+	depends on OF
+	default ARCH_GEMINI
+
+config PCI_TEGRA
+	bool "NVIDIA Tegra PCIe controller"
+	depends on ARCH_TEGRA || COMPILE_TEST
+	depends on PCI_MSI_IRQ_DOMAIN
+	help
+	  Say Y here if you want support for the PCIe host controller found
+	  on NVIDIA Tegra SoCs.
+
+config PCI_RCAR_GEN2
+	bool "Renesas R-Car Gen2 Internal PCI controller"
+	depends on ARCH_RENESAS || COMPILE_TEST
+	depends on ARM
+	help
+	  Say Y here if you want internal PCI support on R-Car Gen2 SoC.
+	  There are 3 internal PCI controllers available with a single
+	  built-in EHCI/OHCI host controller present on each one.
+
+config PCIE_RCAR
+	bool "Renesas R-Car PCIe controller"
+	depends on ARCH_RENESAS || COMPILE_TEST
+	depends on PCI_MSI_IRQ_DOMAIN
+	help
+	  Say Y here if you want PCIe controller support on R-Car SoCs.
+
+config PCI_HOST_COMMON
+	bool
+	select PCI_ECAM
+
+config PCI_HOST_GENERIC
+	bool "Generic PCI host controller"
+	depends on OF
+	select PCI_HOST_COMMON
+	select IRQ_DOMAIN
+	help
+	  Say Y here if you want to support a simple generic PCI host
+	  controller, such as the one emulated by kvmtool.
+
+config PCIE_XILINX
+	bool "Xilinx AXI PCIe host bridge support"
+	depends on OF || COMPILE_TEST
+	help
+	  Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
+	  Host Bridge driver.
+
+config PCI_XGENE
+	bool "X-Gene PCIe controller"
+	depends on ARM64 || COMPILE_TEST
+	depends on OF || (ACPI && PCI_QUIRKS)
+	help
+	  Say Y here if you want internal PCI support on APM X-Gene SoC.
+	  There are 5 internal PCIe ports available. Each port is GEN3 capable
+	  and have varied lanes from x1 to x8.
+
+config PCI_XGENE_MSI
+	bool "X-Gene v1 PCIe MSI feature"
+	depends on PCI_XGENE
+	depends on PCI_MSI_IRQ_DOMAIN
+	default y
+	help
+	  Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC.
+	  This MSI driver supports 5 PCIe ports on the APM X-Gene v1 SoC.
+
+config PCI_V3_SEMI
+	bool "V3 Semiconductor PCI controller"
+	depends on OF
+	depends on ARM || COMPILE_TEST
+	default ARCH_INTEGRATOR_AP
+
+config PCI_VERSATILE
+	bool "ARM Versatile PB PCI controller"
+	depends on ARCH_VERSATILE
+
+config PCIE_IPROC
+	tristate
+	help
+	  This enables the iProc PCIe core controller support for Broadcom's
+	  iProc family of SoCs. An appropriate bus interface driver needs
+	  to be enabled to select this.
+
+config PCIE_IPROC_PLATFORM
+	tristate "Broadcom iProc PCIe platform bus driver"
+	depends on ARCH_BCM_IPROC || (ARM && COMPILE_TEST)
+	depends on OF
+	select PCIE_IPROC
+	default ARCH_BCM_IPROC
+	help
+	  Say Y here if you want to use the Broadcom iProc PCIe controller
+	  through the generic platform bus interface
+
+config PCIE_IPROC_BCMA
+	tristate "Broadcom iProc PCIe BCMA bus driver"
+	depends on ARM && (ARCH_BCM_IPROC || COMPILE_TEST)
+	select PCIE_IPROC
+	select BCMA
+	default ARCH_BCM_5301X
+	help
+	  Say Y here if you want to use the Broadcom iProc PCIe controller
+	  through the BCMA bus interface
+
+config PCIE_IPROC_MSI
+	bool "Broadcom iProc PCIe MSI support"
+	depends on PCIE_IPROC_PLATFORM || PCIE_IPROC_BCMA
+	depends on PCI_MSI_IRQ_DOMAIN
+	default ARCH_BCM_IPROC
+	help
+	  Say Y here if you want to enable MSI support for Broadcom's iProc
+	  PCIe controller
+
+config PCIE_ALTERA
+	tristate "Altera PCIe controller"
+	depends on ARM || NIOS2 || ARM64 || COMPILE_TEST
+	help
+	  Say Y here if you want to enable PCIe controller support on Altera
+	  FPGA.
+
+config PCIE_ALTERA_MSI
+	tristate "Altera PCIe MSI feature"
+	depends on PCIE_ALTERA
+	depends on PCI_MSI_IRQ_DOMAIN
+	help
+	  Say Y here if you want PCIe MSI support for the Altera FPGA.
+	  This MSI driver supports Altera MSI to GIC controller IP.
+
+config PCI_HOST_THUNDER_PEM
+	bool "Cavium Thunder PCIe controller to off-chip devices"
+	depends on ARM64 || COMPILE_TEST
+	depends on OF || (ACPI && PCI_QUIRKS)
+	select PCI_HOST_COMMON
+	help
+	  Say Y here if you want PCIe support for CN88XX Cavium Thunder SoCs.
+
+config PCI_HOST_THUNDER_ECAM
+	bool "Cavium Thunder ECAM controller to on-chip devices on pass-1.x silicon"
+	depends on ARM64 || COMPILE_TEST
+	depends on OF || (ACPI && PCI_QUIRKS)
+	select PCI_HOST_COMMON
+	help
+	  Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs.
+
+config PCIE_ROCKCHIP
+	bool
+	depends on PCI
+
+config PCIE_ROCKCHIP_HOST
+	tristate "Rockchip PCIe host controller"
+	depends on ARCH_ROCKCHIP || COMPILE_TEST
+	depends on OF
+	depends on PCI_MSI_IRQ_DOMAIN
+	select MFD_SYSCON
+	select PCIE_ROCKCHIP
+	help
+	  Say Y here if you want internal PCI support on Rockchip SoC.
+	  There is 1 internal PCIe port available to support GEN2 with
+	  4 slots.
+
+config PCIE_ROCKCHIP_EP
+	bool "Rockchip PCIe endpoint controller"
+	depends on ARCH_ROCKCHIP || COMPILE_TEST
+	depends on OF
+	depends on PCI_ENDPOINT
+	select MFD_SYSCON
+	select PCIE_ROCKCHIP
+	help
+	  Say Y here if you want to support Rockchip PCIe controller in
+	  endpoint mode on Rockchip SoC. There is 1 internal PCIe port
+	  available to support GEN2 with 4 slots.
+
+config PCIE_MEDIATEK
+	tristate "MediaTek PCIe controller"
+	depends on ARCH_MEDIATEK || COMPILE_TEST
+	depends on OF
+	depends on PCI_MSI_IRQ_DOMAIN
+	help
+	  Say Y here if you want to enable PCIe controller support on
+	  MediaTek SoCs.
+
+config PCIE_MOBIVEIL
+	bool "Mobiveil AXI PCIe controller"
+	depends on ARCH_ZYNQMP || COMPILE_TEST
+	depends on OF
+	depends on PCI_MSI_IRQ_DOMAIN
+	help
+	  Say Y here if you want to enable support for the Mobiveil AXI PCIe
+	  Soft IP. It has up to 8 outbound and inbound windows
+	  for address translation and it is a PCIe Gen4 IP.
+
+config PCIE_TANGO_SMP8759
+	bool "Tango SMP8759 PCIe controller (DANGEROUS)"
+	depends on ARCH_TANGO && PCI_MSI && OF
+	depends on BROKEN
+	select PCI_HOST_COMMON
+	help
+	  Say Y here to enable PCIe controller support for Sigma Designs
+	  Tango SMP8759-based systems.
+
+	  Note: The SMP8759 controller multiplexes PCI config and MMIO
+	  accesses, and Linux doesn't provide a way to serialize them.
+	  This can lead to data corruption if drivers perform concurrent
+	  config and MMIO accesses.
+
+config VMD
+	depends on PCI_MSI && X86_64 && SRCU
+	select X86_DEV_DMA_OPS
+	tristate "Intel Volume Management Device Driver"
+	---help---
+	  Adds support for the Intel Volume Management Device (VMD). VMD is a
+	  secondary PCI host bridge that allows PCI Express root ports,
+	  and devices attached to them, to be removed from the default
+	  PCI domain and placed within the VMD domain. This provides
+	  more bus resources than are otherwise possible with a
+	  single domain. If you know your system provides one of these and
+	  has devices attached to it, say Y; if you are not sure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called vmd.
+
+config PCI_HYPERV_INTERFACE
+	tristate "Hyper-V PCI Interface"
+	depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64
+	help
+	  The Hyper-V PCI Interface is a helper driver allows other drivers to
+	  have a common interface with the Hyper-V PCI frontend driver.
+
+config PCIE_ASR1803
+	bool "ASR1803/1828 PCIe controller"
+	depends on CPU_ASR18XX
+	help
+		Say Y here if you want PCIe support on ASR1803 or ASR1828 single PCIE mode
+
+source "drivers/pci/controller/dwc/Kconfig"
+endmenu
diff --git a/marvell/linux/drivers/pci/controller/Makefile b/marvell/linux/drivers/pci/controller/Makefile
new file mode 100644
index 0000000..1eb3c35
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/Makefile
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PCIE_CADENCE) += pcie-cadence.o
+obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o
+obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o
+obj-$(CONFIG_PCI_FTPCI100) += pci-ftpci100.o
+obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o
+obj-$(CONFIG_PCI_HYPERV_INTERFACE) += pci-hyperv-intf.o
+obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
+obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o
+obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
+obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
+obj-$(CONFIG_PCIE_RCAR) += pcie-rcar.o
+obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o
+obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
+obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
+obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
+obj-$(CONFIG_PCI_V3_SEMI) += pci-v3-semi.o
+obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
+obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
+obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o
+obj-$(CONFIG_PCIE_IPROC_MSI) += pcie-iproc-msi.o
+obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o
+obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o
+obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o
+obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o
+obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
+obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
+obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
+obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
+obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o
+obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o
+obj-$(CONFIG_PCIE_ASR1803) += pcie-falcon.o
+obj-$(CONFIG_VMD) += vmd.o
+# pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW
+obj-y				+= dwc/
+
+
+# The following drivers are for devices that use the generic ACPI
+# pci_root.c driver but don't support standard ECAM config access.
+# They contain MCFG quirks to replace the generic ECAM accessors with
+# device-specific ones that are shared with the DT driver.
+
+# The ACPI driver is generic and should not require driver-specific
+# config options to be enabled, so we always build these drivers on
+# ARM64 and use internal ifdefs to only build the pieces we need
+# depending on whether ACPI, the DT driver, or both are enabled.
+
+ifdef CONFIG_PCI
+obj-$(CONFIG_ARM64) += pci-thunder-ecam.o
+obj-$(CONFIG_ARM64) += pci-thunder-pem.o
+obj-$(CONFIG_ARM64) += pci-xgene.o
+endif
diff --git a/marvell/linux/drivers/pci/controller/dwc/Kconfig b/marvell/linux/drivers/pci/controller/dwc/Kconfig
new file mode 100644
index 0000000..ab4025e
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/Kconfig
@@ -0,0 +1,280 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "DesignWare PCI Core Support"
+	depends on PCI
+
+config PCIE_DW
+	bool
+
+config PCIE_DW_HOST
+	bool
+	select PCIE_DW
+
+config PCIE_DW_EP
+	bool
+	depends on PCI_ENDPOINT
+	select PCIE_DW
+
+config PCI_DRA7XX
+	bool
+
+config PCI_DRA7XX_HOST
+	bool "TI DRA7xx PCIe controller Host Mode"
+	depends on SOC_DRA7XX || COMPILE_TEST
+	depends on PCI_MSI_IRQ_DOMAIN
+	depends on OF && HAS_IOMEM && TI_PIPE3
+	select PCIE_DW_HOST
+	select PCI_DRA7XX
+	default y
+	help
+	  Enables support for the PCIe controller in the DRA7xx SoC to work in
+	  host mode. There are two instances of PCIe controller in DRA7xx.
+	  This controller can work either as EP or RC. In order to enable
+	  host-specific features PCI_DRA7XX_HOST must be selected and in order
+	  to enable device-specific features PCI_DRA7XX_EP must be selected.
+	  This uses the DesignWare core.
+
+config PCI_DRA7XX_EP
+	bool "TI DRA7xx PCIe controller Endpoint Mode"
+	depends on SOC_DRA7XX || COMPILE_TEST
+	depends on PCI_ENDPOINT
+	depends on OF && HAS_IOMEM && TI_PIPE3
+	select PCIE_DW_EP
+	select PCI_DRA7XX
+	help
+	  Enables support for the PCIe controller in the DRA7xx SoC to work in
+	  endpoint mode. There are two instances of PCIe controller in DRA7xx.
+	  This controller can work either as EP or RC. In order to enable
+	  host-specific features PCI_DRA7XX_HOST must be selected and in order
+	  to enable device-specific features PCI_DRA7XX_EP must be selected.
+	  This uses the DesignWare core.
+
+config PCIE_DW_PLAT
+	bool
+
+config PCIE_DW_PLAT_HOST
+	bool "Platform bus based DesignWare PCIe Controller - Host mode"
+	depends on PCI && PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	select PCIE_DW_PLAT
+	help
+	  Enables support for the PCIe controller in the Designware IP to
+	  work in host mode. There are two instances of PCIe controller in
+	  Designware IP.
+	  This controller can work either as EP or RC. In order to enable
+	  host-specific features PCIE_DW_PLAT_HOST must be selected and in
+	  order to enable device-specific features PCI_DW_PLAT_EP must be
+	  selected.
+
+config PCIE_DW_PLAT_EP
+	bool "Platform bus based DesignWare PCIe Controller - Endpoint mode"
+	depends on PCI && PCI_MSI_IRQ_DOMAIN
+	depends on PCI_ENDPOINT
+	select PCIE_DW_EP
+	select PCIE_DW_PLAT
+	help
+	  Enables support for the PCIe controller in the Designware IP to
+	  work in endpoint mode. There are two instances of PCIe controller
+	  in Designware IP.
+	  This controller can work either as EP or RC. In order to enable
+	  host-specific features PCIE_DW_PLAT_HOST must be selected and in
+	  order to enable device-specific features PCI_DW_PLAT_EP must be
+	  selected.
+
+config PCI_EXYNOS
+	bool "Samsung Exynos PCIe controller"
+	depends on SOC_EXYNOS5440 || COMPILE_TEST
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+
+config PCI_IMX6
+	bool "Freescale i.MX6/7/8 PCIe controller"
+	depends on ARCH_MXC || COMPILE_TEST
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+
+config PCIE_SPEAR13XX
+	bool "STMicroelectronics SPEAr PCIe controller"
+	depends on ARCH_SPEAR13XX || COMPILE_TEST
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	help
+	  Say Y here if you want PCIe support on SPEAr13XX SoCs.
+
+config PCI_KEYSTONE
+	bool
+
+config PCI_KEYSTONE_HOST
+	bool "PCI Keystone Host Mode"
+	depends on ARCH_KEYSTONE || ARCH_K3 || ((ARM || ARM64) && COMPILE_TEST)
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	select PCI_KEYSTONE
+	default y
+	help
+	  Enables support for the PCIe controller in the Keystone SoC to
+	  work in host mode. The PCI controller on Keystone is based on
+	  DesignWare hardware and therefore the driver re-uses the
+	  DesignWare core functions to implement the driver.
+
+config PCI_KEYSTONE_EP
+	bool "PCI Keystone Endpoint Mode"
+	depends on ARCH_KEYSTONE || ARCH_K3 || ((ARM || ARM64) && COMPILE_TEST)
+	depends on PCI_ENDPOINT
+	select PCIE_DW_EP
+	select PCI_KEYSTONE
+	help
+	  Enables support for the PCIe controller in the Keystone SoC to
+	  work in endpoint mode. The PCI controller on Keystone is based
+	  on DesignWare hardware and therefore the driver re-uses the
+	  DesignWare core functions to implement the driver.
+
+config PCI_LAYERSCAPE
+	bool "Freescale Layerscape PCIe controller - Host mode"
+	depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST)
+	depends on PCI_MSI_IRQ_DOMAIN
+	select MFD_SYSCON
+	select PCIE_DW_HOST
+	help
+	  Say Y here if you want to enable PCIe controller support on Layerscape
+	  SoCs to work in Host mode.
+	  This controller can work either as EP or RC. The RCW[HOST_AGT_PEX]
+	  determines which PCIe controller works in EP mode and which PCIe
+	  controller works in RC mode.
+
+config PCI_LAYERSCAPE_EP
+	bool "Freescale Layerscape PCIe controller - Endpoint mode"
+	depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST)
+	depends on PCI_ENDPOINT
+	select PCIE_DW_EP
+	help
+	  Say Y here if you want to enable PCIe controller support on Layerscape
+	  SoCs to work in Endpoint mode.
+	  This controller can work either as EP or RC. The RCW[HOST_AGT_PEX]
+	  determines which PCIe controller works in EP mode and which PCIe
+	  controller works in RC mode.
+
+config PCI_HISI
+	depends on OF && (ARM64 || COMPILE_TEST)
+	bool "HiSilicon Hip05 and Hip06 SoCs PCIe controllers"
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	select PCI_HOST_COMMON
+	help
+	  Say Y here if you want PCIe controller support on HiSilicon
+	  Hip05 and Hip06 SoCs
+
+config PCIE_QCOM
+	bool "Qualcomm PCIe controller"
+	depends on OF && (ARCH_QCOM || COMPILE_TEST)
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	help
+	  Say Y here to enable PCIe controller support on Qualcomm SoCs. The
+	  PCIe controller uses the DesignWare core plus Qualcomm-specific
+	  hardware wrappers.
+
+config PCIE_ARMADA_8K
+	bool "Marvell Armada-8K PCIe controller"
+	depends on ARCH_MVEBU || COMPILE_TEST
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	help
+	  Say Y here if you want to enable PCIe controller support on
+	  Armada-8K SoCs. The PCIe controller on Armada-8K is based on
+	  DesignWare hardware and therefore the driver re-uses the
+	  DesignWare core functions to implement the driver.
+
+config PCIE_ARTPEC6
+	bool
+
+config PCIE_ARTPEC6_HOST
+	bool "Axis ARTPEC-6 PCIe controller Host Mode"
+	depends on MACH_ARTPEC6 || COMPILE_TEST
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	select PCIE_ARTPEC6
+	help
+	  Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
+	  host mode. This uses the DesignWare core.
+
+config PCIE_ARTPEC6_EP
+	bool "Axis ARTPEC-6 PCIe controller Endpoint Mode"
+	depends on MACH_ARTPEC6 || COMPILE_TEST
+	depends on PCI_ENDPOINT
+	select PCIE_DW_EP
+	select PCIE_ARTPEC6
+	help
+	  Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
+	  endpoint mode. This uses the DesignWare core.
+
+config PCIE_KIRIN
+	depends on OF && (ARM64 || COMPILE_TEST)
+	bool "HiSilicon Kirin series SoCs PCIe controllers"
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	help
+	  Say Y here if you want PCIe controller support
+	  on HiSilicon Kirin series SoCs.
+
+config PCIE_HISI_STB
+	bool "HiSilicon STB SoCs PCIe controllers"
+	depends on ARCH_HISI || COMPILE_TEST
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	help
+          Say Y here if you want PCIe controller support on HiSilicon STB SoCs
+
+config PCI_MESON
+	bool "MESON PCIe controller"
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	help
+	  Say Y here if you want to enable PCI controller support on Amlogic
+	  SoCs. The PCI controller on Amlogic is based on DesignWare hardware
+	  and therefore the driver re-uses the DesignWare core functions to
+	  implement the driver.
+
+config PCIE_TEGRA194
+	tristate "NVIDIA Tegra194 (and later) PCIe controller"
+	depends on ARCH_TEGRA_194_SOC || COMPILE_TEST
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	select PHY_TEGRA194_P2U
+	help
+	  Say Y here if you want support for DesignWare core based PCIe host
+	  controller found in NVIDIA Tegra194 SoC.
+
+config PCIE_UNIPHIER
+	bool "Socionext UniPhier PCIe controllers"
+	depends on ARCH_UNIPHIER || COMPILE_TEST
+	depends on OF && HAS_IOMEM
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	help
+	  Say Y here if you want PCIe controller support on UniPhier SoCs.
+	  This driver supports LD20 and PXs3 SoCs.
+
+config PCIE_AL
+	bool "Amazon Annapurna Labs PCIe controller"
+	depends on OF && (ARM64 || COMPILE_TEST)
+	depends on PCI_MSI_IRQ_DOMAIN
+	select PCIE_DW_HOST
+	help
+	  Say Y here to enable support of the Amazon's Annapurna Labs PCIe
+	  controller IP on Amazon SoCs. The PCIe controller uses the DesignWare
+	  core plus Annapurna Labs proprietary hardware wrappers. This is
+	  required only for DT-based platforms. ACPI platforms with the
+	  Annapurna Labs PCIe controller don't need to enable this.
+
+config PCIE_ASR1901
+	bool "ASR1901 PCIe controller"
+	depends on CPU_ASR1901 || COMPILE_TEST
+	select PCIE_DW_HOST
+	help
+	  Say Y here if you want to enable PCIe controller support on
+	  ASR1901 SoCs. The PCIe controller on ASR1901 is based on
+	  DesignWare hardware and therefore the driver re-uses the
+	  DesignWare core functions to implement the driver.
+
+endmenu
diff --git a/marvell/linux/drivers/pci/controller/dwc/Makefile b/marvell/linux/drivers/pci/controller/dwc/Makefile
new file mode 100644
index 0000000..e0812f8
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PCIE_DW) += pcie-designware.o
+obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o
+obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o
+obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
+obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
+obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
+obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
+obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
+obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o
+obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
+obj-$(CONFIG_PCI_LAYERSCAPE_EP) += pci-layerscape-ep.o
+obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
+obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
+obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
+obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
+obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
+obj-$(CONFIG_PCI_MESON) += pci-meson.o
+obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o
+obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
+obj-$(CONFIG_PCIE_ASR1901) += pcie-kestrel.o
+# The following drivers are for devices that use the generic ACPI
+# pci_root.c driver but don't support standard ECAM config access.
+# They contain MCFG quirks to replace the generic ECAM accessors with
+# device-specific ones that are shared with the DT driver.
+
+# The ACPI driver is generic and should not require driver-specific
+# config options to be enabled, so we always build these drivers on
+# ARM64 and use internal ifdefs to only build the pieces we need
+# depending on whether ACPI, the DT driver, or both are enabled.
+
+ifdef CONFIG_PCI
+obj-$(CONFIG_ARM64) += pcie-al.o
+obj-$(CONFIG_ARM64) += pcie-hisi.o
+endif
diff --git a/marvell/linux/drivers/pci/controller/dwc/pci-dra7xx.c b/marvell/linux/drivers/pci/controller/dwc/pci-dra7xx.c
new file mode 100644
index 0000000..4234ddb
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -0,0 +1,947 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs
+ *
+ * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/gpio/consumer.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+/* PCIe controller wrapper DRA7XX configuration registers */
+
+#define	PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN		0x0024
+#define	PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN		0x0028
+#define	ERR_SYS						BIT(0)
+#define	ERR_FATAL					BIT(1)
+#define	ERR_NONFATAL					BIT(2)
+#define	ERR_COR						BIT(3)
+#define	ERR_AXI						BIT(4)
+#define	ERR_ECRC					BIT(5)
+#define	PME_TURN_OFF					BIT(8)
+#define	PME_TO_ACK					BIT(9)
+#define	PM_PME						BIT(10)
+#define	LINK_REQ_RST					BIT(11)
+#define	LINK_UP_EVT					BIT(12)
+#define	CFG_BME_EVT					BIT(13)
+#define	CFG_MSE_EVT					BIT(14)
+#define	INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \
+			ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \
+			LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT)
+
+#define	PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI		0x0034
+#define	PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI		0x0038
+#define	INTA						BIT(0)
+#define	INTB						BIT(1)
+#define	INTC						BIT(2)
+#define	INTD						BIT(3)
+#define	MSI						BIT(4)
+#define	LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
+
+#define	PCIECTRL_TI_CONF_DEVICE_TYPE			0x0100
+#define	DEVICE_TYPE_EP					0x0
+#define	DEVICE_TYPE_LEG_EP				0x1
+#define	DEVICE_TYPE_RC					0x4
+
+#define	PCIECTRL_DRA7XX_CONF_DEVICE_CMD			0x0104
+#define	LTSSM_EN					0x1
+
+#define	PCIECTRL_DRA7XX_CONF_PHY_CS			0x010C
+#define	LINK_UP						BIT(16)
+#define	DRA7XX_CPU_TO_BUS_ADDR				0x0FFFFFFF
+
+#define EXP_CAP_ID_OFFSET				0x70
+
+#define	PCIECTRL_TI_CONF_INTX_ASSERT			0x0124
+#define	PCIECTRL_TI_CONF_INTX_DEASSERT			0x0128
+
+#define	PCIECTRL_TI_CONF_MSI_XMT			0x012c
+#define MSI_REQ_GRANT					BIT(0)
+#define MSI_VECTOR_SHIFT				7
+
+#define PCIE_1LANE_2LANE_SELECTION			BIT(13)
+#define PCIE_B1C0_MODE_SEL				BIT(2)
+#define PCIE_B0_B1_TSYNCEN				BIT(0)
+
+struct dra7xx_pcie {
+	struct dw_pcie		*pci;
+	void __iomem		*base;		/* DT ti_conf */
+	int			phy_count;	/* DT phy-names count */
+	struct phy		**phy;
+	int			link_gen;
+	struct irq_domain	*irq_domain;
+	enum dw_pcie_device_mode mode;
+};
+
+struct dra7xx_pcie_of_data {
+	enum dw_pcie_device_mode mode;
+	u32 b1co_mode_sel_mask;
+};
+
+#define to_dra7xx_pcie(x)	dev_get_drvdata((x)->dev)
+
+static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset)
+{
+	return readl(pcie->base + offset);
+}
+
+static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
+				      u32 value)
+{
+	writel(value, pcie->base + offset);
+}
+
+static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
+{
+	return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
+}
+
+static int dra7xx_pcie_link_up(struct dw_pcie *pci)
+{
+	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+	u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
+
+	return !!(reg & LINK_UP);
+}
+
+static void dra7xx_pcie_stop_link(struct dw_pcie *pci)
+{
+	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+	u32 reg;
+
+	reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
+	reg &= ~LTSSM_EN;
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
+}
+
+static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
+{
+	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+	struct device *dev = pci->dev;
+	u32 reg;
+	u32 exp_cap_off = EXP_CAP_ID_OFFSET;
+
+	if (dw_pcie_link_up(pci)) {
+		dev_err(dev, "link is already up\n");
+		return 0;
+	}
+
+	if (dra7xx->link_gen == 1) {
+		dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP,
+			     4, &reg);
+		if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
+			reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
+			reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
+			dw_pcie_write(pci->dbi_base + exp_cap_off +
+				      PCI_EXP_LNKCAP, 4, reg);
+		}
+
+		dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2,
+			     2, &reg);
+		if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
+			reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
+			reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
+			dw_pcie_write(pci->dbi_base + exp_cap_off +
+				      PCI_EXP_LNKCTL2, 2, reg);
+		}
+	}
+
+	reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
+	reg |= LTSSM_EN;
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
+
+	return 0;
+}
+
+static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
+{
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
+			   LEG_EP_INTERRUPTS | MSI);
+
+	dra7xx_pcie_writel(dra7xx,
+			   PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
+			   MSI | LEG_EP_INTERRUPTS);
+}
+
+static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx)
+{
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
+			   INTERRUPTS);
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN,
+			   INTERRUPTS);
+}
+
+static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
+{
+	dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
+	dra7xx_pcie_enable_msi_interrupts(dra7xx);
+}
+
+static int dra7xx_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+
+	dw_pcie_setup_rc(pp);
+
+	dra7xx_pcie_establish_link(pci);
+	dw_pcie_wait_for_link(pci);
+	dw_pcie_msi_init(pp);
+	dra7xx_pcie_enable_interrupts(dra7xx);
+
+	return 0;
+}
+
+static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
+	.host_init = dra7xx_pcie_host_init,
+};
+
+static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+				irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+	irq_set_chip_data(irq, domain->host_data);
+
+	return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+	.map = dra7xx_pcie_intx_map,
+	.xlate = pci_irqd_intx_xlate,
+};
+
+static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct device *dev = pci->dev;
+	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+	struct device_node *node = dev->of_node;
+	struct device_node *pcie_intc_node =  of_get_next_child(node, NULL);
+
+	if (!pcie_intc_node) {
+		dev_err(dev, "No PCIe Intc node found\n");
+		return -ENODEV;
+	}
+
+	dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+						   &intx_domain_ops, pp);
+	of_node_put(pcie_intc_node);
+	if (!dra7xx->irq_domain) {
+		dev_err(dev, "Failed to get a INTx IRQ domain\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
+{
+	struct dra7xx_pcie *dra7xx = arg;
+	struct dw_pcie *pci = dra7xx->pci;
+	struct pcie_port *pp = &pci->pp;
+	unsigned long reg;
+	u32 virq, bit;
+
+	reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
+
+	switch (reg) {
+	case MSI:
+		dw_handle_msi_irq(pp);
+		break;
+	case INTA:
+	case INTB:
+	case INTC:
+	case INTD:
+		for_each_set_bit(bit, &reg, PCI_NUM_INTX) {
+			virq = irq_find_mapping(dra7xx->irq_domain, bit);
+			if (virq)
+				generic_handle_irq(virq);
+		}
+		break;
+	}
+
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
+{
+	struct dra7xx_pcie *dra7xx = arg;
+	struct dw_pcie *pci = dra7xx->pci;
+	struct device *dev = pci->dev;
+	struct dw_pcie_ep *ep = &pci->ep;
+	u32 reg;
+
+	reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
+
+	if (reg & ERR_SYS)
+		dev_dbg(dev, "System Error\n");
+
+	if (reg & ERR_FATAL)
+		dev_dbg(dev, "Fatal Error\n");
+
+	if (reg & ERR_NONFATAL)
+		dev_dbg(dev, "Non Fatal Error\n");
+
+	if (reg & ERR_COR)
+		dev_dbg(dev, "Correctable Error\n");
+
+	if (reg & ERR_AXI)
+		dev_dbg(dev, "AXI tag lookup fatal Error\n");
+
+	if (reg & ERR_ECRC)
+		dev_dbg(dev, "ECRC Error\n");
+
+	if (reg & PME_TURN_OFF)
+		dev_dbg(dev,
+			"Power Management Event Turn-Off message received\n");
+
+	if (reg & PME_TO_ACK)
+		dev_dbg(dev,
+			"Power Management Turn-Off Ack message received\n");
+
+	if (reg & PM_PME)
+		dev_dbg(dev, "PM Power Management Event message received\n");
+
+	if (reg & LINK_REQ_RST)
+		dev_dbg(dev, "Link Request Reset\n");
+
+	if (reg & LINK_UP_EVT) {
+		if (dra7xx->mode == DW_PCIE_EP_TYPE)
+			dw_pcie_ep_linkup(ep);
+		dev_dbg(dev, "Link-up state change\n");
+	}
+
+	if (reg & CFG_BME_EVT)
+		dev_dbg(dev, "CFG 'Bus Master Enable' change\n");
+
+	if (reg & CFG_MSE_EVT)
+		dev_dbg(dev, "CFG 'Memory Space Enable' change\n");
+
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg);
+
+	return IRQ_HANDLED;
+}
+
+static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+	enum pci_barno bar;
+
+	for (bar = BAR_0; bar <= BAR_5; bar++)
+		dw_pcie_ep_reset_bar(pci, bar);
+
+	dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
+}
+
+static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx)
+{
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1);
+	mdelay(1);
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1);
+}
+
+static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
+				      u8 interrupt_num)
+{
+	u32 reg;
+
+	reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT;
+	reg |= MSI_REQ_GRANT;
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg);
+}
+
+static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+				 enum pci_epc_irq_type type, u16 interrupt_num)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+
+	switch (type) {
+	case PCI_EPC_IRQ_LEGACY:
+		dra7xx_pcie_raise_legacy_irq(dra7xx);
+		break;
+	case PCI_EPC_IRQ_MSI:
+		dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num);
+		break;
+	default:
+		dev_err(pci->dev, "UNKNOWN IRQ type\n");
+	}
+
+	return 0;
+}
+
+static const struct pci_epc_features dra7xx_pcie_epc_features = {
+	.linkup_notifier = true,
+	.msi_capable = true,
+	.msix_capable = false,
+};
+
+static const struct pci_epc_features*
+dra7xx_pcie_get_features(struct dw_pcie_ep *ep)
+{
+	return &dra7xx_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+	.ep_init = dra7xx_pcie_ep_init,
+	.raise_irq = dra7xx_pcie_raise_irq,
+	.get_features = dra7xx_pcie_get_features,
+};
+
+static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
+				     struct platform_device *pdev)
+{
+	int ret;
+	struct dw_pcie_ep *ep;
+	struct resource *res;
+	struct device *dev = &pdev->dev;
+	struct dw_pcie *pci = dra7xx->pci;
+
+	ep = &pci->ep;
+	ep->ops = &pcie_ep_ops;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics");
+	pci->dbi_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pci->dbi_base))
+		return PTR_ERR(pci->dbi_base);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2");
+	pci->dbi_base2 = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pci->dbi_base2))
+		return PTR_ERR(pci->dbi_base2);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+	if (!res)
+		return -EINVAL;
+
+	ep->phys_base = res->start;
+	ep->addr_size = resource_size(res);
+
+	ret = dw_pcie_ep_init(ep);
+	if (ret) {
+		dev_err(dev, "failed to initialize endpoint\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
+				       struct platform_device *pdev)
+{
+	int ret;
+	struct dw_pcie *pci = dra7xx->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = pci->dev;
+	struct resource *res;
+
+	pp->irq = platform_get_irq(pdev, 1);
+	if (pp->irq < 0) {
+		dev_err(dev, "missing IRQ resource\n");
+		return pp->irq;
+	}
+
+	ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler,
+			       IRQF_SHARED | IRQF_NO_THREAD,
+			       "dra7-pcie-msi",	dra7xx);
+	if (ret) {
+		dev_err(dev, "failed to request irq\n");
+		return ret;
+	}
+
+	ret = dra7xx_pcie_init_irq_domain(pp);
+	if (ret < 0)
+		return ret;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics");
+	pci->dbi_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pci->dbi_base))
+		return PTR_ERR(pci->dbi_base);
+
+	pp->ops = &dra7xx_pcie_host_ops;
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "failed to initialize host\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	.cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup,
+	.start_link = dra7xx_pcie_establish_link,
+	.stop_link = dra7xx_pcie_stop_link,
+	.link_up = dra7xx_pcie_link_up,
+};
+
+static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx)
+{
+	int phy_count = dra7xx->phy_count;
+
+	while (phy_count--) {
+		phy_power_off(dra7xx->phy[phy_count]);
+		phy_exit(dra7xx->phy[phy_count]);
+	}
+}
+
+static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx)
+{
+	int phy_count = dra7xx->phy_count;
+	int ret;
+	int i;
+
+	for (i = 0; i < phy_count; i++) {
+		ret = phy_set_mode(dra7xx->phy[i], PHY_MODE_PCIE);
+		if (ret < 0)
+			goto err_phy;
+
+		ret = phy_init(dra7xx->phy[i]);
+		if (ret < 0)
+			goto err_phy;
+
+		ret = phy_power_on(dra7xx->phy[i]);
+		if (ret < 0) {
+			phy_exit(dra7xx->phy[i]);
+			goto err_phy;
+		}
+	}
+
+	return 0;
+
+err_phy:
+	while (--i >= 0) {
+		phy_power_off(dra7xx->phy[i]);
+		phy_exit(dra7xx->phy[i]);
+	}
+
+	return ret;
+}
+
+static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = {
+	.mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = {
+	.mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra746_pcie_rc_of_data = {
+	.b1co_mode_sel_mask = BIT(2),
+	.mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra726_pcie_rc_of_data = {
+	.b1co_mode_sel_mask = GENMASK(3, 2),
+	.mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra746_pcie_ep_of_data = {
+	.b1co_mode_sel_mask = BIT(2),
+	.mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra726_pcie_ep_of_data = {
+	.b1co_mode_sel_mask = GENMASK(3, 2),
+	.mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct of_device_id of_dra7xx_pcie_match[] = {
+	{
+		.compatible = "ti,dra7-pcie",
+		.data = &dra7xx_pcie_rc_of_data,
+	},
+	{
+		.compatible = "ti,dra7-pcie-ep",
+		.data = &dra7xx_pcie_ep_of_data,
+	},
+	{
+		.compatible = "ti,dra746-pcie-rc",
+		.data = &dra746_pcie_rc_of_data,
+	},
+	{
+		.compatible = "ti,dra726-pcie-rc",
+		.data = &dra726_pcie_rc_of_data,
+	},
+	{
+		.compatible = "ti,dra746-pcie-ep",
+		.data = &dra746_pcie_ep_of_data,
+	},
+	{
+		.compatible = "ti,dra726-pcie-ep",
+		.data = &dra726_pcie_ep_of_data,
+	},
+	{},
+};
+
+/*
+ * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
+ * @dra7xx: the dra7xx device where the workaround should be applied
+ *
+ * Access to the PCIe slave port that are not 32-bit aligned will result
+ * in incorrect mapping to TLP Address and Byte enable fields. Therefore,
+ * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or
+ * 0x3.
+ *
+ * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1.
+ */
+static int dra7xx_pcie_unaligned_memaccess(struct device *dev)
+{
+	int ret;
+	struct device_node *np = dev->of_node;
+	struct of_phandle_args args;
+	struct regmap *regmap;
+
+	regmap = syscon_regmap_lookup_by_phandle(np,
+						 "ti,syscon-unaligned-access");
+	if (IS_ERR(regmap)) {
+		dev_dbg(dev, "can't get ti,syscon-unaligned-access\n");
+		return -EINVAL;
+	}
+
+	ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access",
+					       2, 0, &args);
+	if (ret) {
+		dev_err(dev, "failed to parse ti,syscon-unaligned-access\n");
+		return ret;
+	}
+
+	ret = regmap_update_bits(regmap, args.args[0], args.args[1],
+				 args.args[1]);
+	if (ret)
+		dev_err(dev, "failed to enable unaligned access\n");
+
+	of_node_put(args.np);
+
+	return ret;
+}
+
+static int dra7xx_pcie_configure_two_lane(struct device *dev,
+					  u32 b1co_mode_sel_mask)
+{
+	struct device_node *np = dev->of_node;
+	struct regmap *pcie_syscon;
+	unsigned int pcie_reg;
+	u32 mask;
+	u32 val;
+
+	pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel");
+	if (IS_ERR(pcie_syscon)) {
+		dev_err(dev, "unable to get ti,syscon-lane-sel\n");
+		return -EINVAL;
+	}
+
+	if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1,
+				       &pcie_reg)) {
+		dev_err(dev, "couldn't get lane selection reg offset\n");
+		return -EINVAL;
+	}
+
+	mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN;
+	val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN;
+	regmap_update_bits(pcie_syscon, pcie_reg, mask, val);
+
+	return 0;
+}
+
+static int __init dra7xx_pcie_probe(struct platform_device *pdev)
+{
+	u32 reg;
+	int ret;
+	int irq;
+	int i;
+	int phy_count;
+	struct phy **phy;
+	struct device_link **link;
+	void __iomem *base;
+	struct resource *res;
+	struct dw_pcie *pci;
+	struct dra7xx_pcie *dra7xx;
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	char name[10];
+	struct gpio_desc *reset;
+	const struct of_device_id *match;
+	const struct dra7xx_pcie_of_data *data;
+	enum dw_pcie_device_mode mode;
+	u32 b1co_mode_sel_mask;
+
+	match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev);
+	if (!match)
+		return -EINVAL;
+
+	data = (struct dra7xx_pcie_of_data *)match->data;
+	mode = (enum dw_pcie_device_mode)data->mode;
+	b1co_mode_sel_mask = data->b1co_mode_sel_mask;
+
+	dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
+	if (!dra7xx)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	pci->dev = dev;
+	pci->ops = &dw_pcie_ops;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(dev, "missing IRQ resource: %d\n", irq);
+		return irq;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
+	base = devm_ioremap_nocache(dev, res->start, resource_size(res));
+	if (!base)
+		return -ENOMEM;
+
+	phy_count = of_property_count_strings(np, "phy-names");
+	if (phy_count < 0) {
+		dev_err(dev, "unable to find the strings\n");
+		return phy_count;
+	}
+
+	phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL);
+	if (!phy)
+		return -ENOMEM;
+
+	link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL);
+	if (!link)
+		return -ENOMEM;
+
+	for (i = 0; i < phy_count; i++) {
+		snprintf(name, sizeof(name), "pcie-phy%d", i);
+		phy[i] = devm_phy_get(dev, name);
+		if (IS_ERR(phy[i]))
+			return PTR_ERR(phy[i]);
+
+		link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
+		if (!link[i]) {
+			ret = -EINVAL;
+			goto err_link;
+		}
+	}
+
+	dra7xx->base = base;
+	dra7xx->phy = phy;
+	dra7xx->pci = pci;
+	dra7xx->phy_count = phy_count;
+
+	if (phy_count == 2) {
+		ret = dra7xx_pcie_configure_two_lane(dev, b1co_mode_sel_mask);
+		if (ret < 0)
+			dra7xx->phy_count = 1; /* Fallback to x1 lane mode */
+	}
+
+	ret = dra7xx_pcie_enable_phy(dra7xx);
+	if (ret) {
+		dev_err(dev, "failed to enable phy\n");
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, dra7xx);
+
+	pm_runtime_enable(dev);
+	ret = pm_runtime_get_sync(dev);
+	if (ret < 0) {
+		dev_err(dev, "pm_runtime_get_sync failed\n");
+		goto err_get_sync;
+	}
+
+	reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH);
+	if (IS_ERR(reset)) {
+		ret = PTR_ERR(reset);
+		dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
+		goto err_gpio;
+	}
+
+	reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
+	reg &= ~LTSSM_EN;
+	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
+
+	dra7xx->link_gen = of_pci_get_max_link_speed(np);
+	if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2)
+		dra7xx->link_gen = 2;
+
+	switch (mode) {
+	case DW_PCIE_RC_TYPE:
+		if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) {
+			ret = -ENODEV;
+			goto err_gpio;
+		}
+
+		dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
+				   DEVICE_TYPE_RC);
+
+		ret = dra7xx_pcie_unaligned_memaccess(dev);
+		if (ret)
+			dev_err(dev, "WA for Errata i870 not applied\n");
+
+		ret = dra7xx_add_pcie_port(dra7xx, pdev);
+		if (ret < 0)
+			goto err_gpio;
+		break;
+	case DW_PCIE_EP_TYPE:
+		if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) {
+			ret = -ENODEV;
+			goto err_gpio;
+		}
+
+		dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
+				   DEVICE_TYPE_EP);
+
+		ret = dra7xx_pcie_unaligned_memaccess(dev);
+		if (ret)
+			goto err_gpio;
+
+		ret = dra7xx_add_pcie_ep(dra7xx, pdev);
+		if (ret < 0)
+			goto err_gpio;
+		break;
+	default:
+		dev_err(dev, "INVALID device type %d\n", mode);
+	}
+	dra7xx->mode = mode;
+
+	ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
+			       IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
+	if (ret) {
+		dev_err(dev, "failed to request irq\n");
+		goto err_gpio;
+	}
+
+	return 0;
+
+err_gpio:
+	pm_runtime_put(dev);
+
+err_get_sync:
+	pm_runtime_disable(dev);
+	dra7xx_pcie_disable_phy(dra7xx);
+
+err_link:
+	while (--i >= 0)
+		device_link_del(link[i]);
+
+	return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dra7xx_pcie_suspend(struct device *dev)
+{
+	struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+	struct dw_pcie *pci = dra7xx->pci;
+	u32 val;
+
+	if (dra7xx->mode != DW_PCIE_RC_TYPE)
+		return 0;
+
+	/* clear MSE */
+	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
+	val &= ~PCI_COMMAND_MEMORY;
+	dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
+
+	return 0;
+}
+
+static int dra7xx_pcie_resume(struct device *dev)
+{
+	struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+	struct dw_pcie *pci = dra7xx->pci;
+	u32 val;
+
+	if (dra7xx->mode != DW_PCIE_RC_TYPE)
+		return 0;
+
+	/* set MSE */
+	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
+	val |= PCI_COMMAND_MEMORY;
+	dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
+
+	return 0;
+}
+
+static int dra7xx_pcie_suspend_noirq(struct device *dev)
+{
+	struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+
+	dra7xx_pcie_disable_phy(dra7xx);
+
+	return 0;
+}
+
+static int dra7xx_pcie_resume_noirq(struct device *dev)
+{
+	struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+	int ret;
+
+	ret = dra7xx_pcie_enable_phy(dra7xx);
+	if (ret) {
+		dev_err(dev, "failed to enable phy\n");
+		return ret;
+	}
+
+	return 0;
+}
+#endif
+
+static void dra7xx_pcie_shutdown(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+	int ret;
+
+	dra7xx_pcie_stop_link(dra7xx->pci);
+
+	ret = pm_runtime_put_sync(dev);
+	if (ret < 0)
+		dev_dbg(dev, "pm_runtime_put_sync failed\n");
+
+	pm_runtime_disable(dev);
+	dra7xx_pcie_disable_phy(dra7xx);
+}
+
+static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
+				      dra7xx_pcie_resume_noirq)
+};
+
+static struct platform_driver dra7xx_pcie_driver = {
+	.driver = {
+		.name	= "dra7-pcie",
+		.of_match_table = of_dra7xx_pcie_match,
+		.suppress_bind_attrs = true,
+		.pm	= &dra7xx_pcie_pm_ops,
+	},
+	.shutdown = dra7xx_pcie_shutdown,
+};
+builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pci-exynos.c b/marvell/linux/drivers/pci/controller/dwc/pci-exynos.c
new file mode 100644
index 0000000..14a6ba4
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pci-exynos.c
@@ -0,0 +1,538 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Samsung EXYNOS SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define to_exynos_pcie(x)	dev_get_drvdata((x)->dev)
+
+/* PCIe ELBI registers */
+#define PCIE_IRQ_PULSE			0x000
+#define IRQ_INTA_ASSERT			BIT(0)
+#define IRQ_INTB_ASSERT			BIT(2)
+#define IRQ_INTC_ASSERT			BIT(4)
+#define IRQ_INTD_ASSERT			BIT(6)
+#define PCIE_IRQ_LEVEL			0x004
+#define PCIE_IRQ_SPECIAL		0x008
+#define PCIE_IRQ_EN_PULSE		0x00c
+#define PCIE_IRQ_EN_LEVEL		0x010
+#define IRQ_MSI_ENABLE			BIT(2)
+#define PCIE_IRQ_EN_SPECIAL		0x014
+#define PCIE_PWR_RESET			0x018
+#define PCIE_CORE_RESET			0x01c
+#define PCIE_CORE_RESET_ENABLE		BIT(0)
+#define PCIE_STICKY_RESET		0x020
+#define PCIE_NONSTICKY_RESET		0x024
+#define PCIE_APP_INIT_RESET		0x028
+#define PCIE_APP_LTSSM_ENABLE		0x02c
+#define PCIE_ELBI_RDLH_LINKUP		0x064
+#define PCIE_ELBI_LTSSM_ENABLE		0x1
+#define PCIE_ELBI_SLV_AWMISC		0x11c
+#define PCIE_ELBI_SLV_ARMISC		0x120
+#define PCIE_ELBI_SLV_DBI_ENABLE	BIT(21)
+
+struct exynos_pcie_mem_res {
+	void __iomem *elbi_base;   /* DT 0th resource: PCIe CTRL */
+};
+
+struct exynos_pcie_clk_res {
+	struct clk *clk;
+	struct clk *bus_clk;
+};
+
+struct exynos_pcie {
+	struct dw_pcie			*pci;
+	struct exynos_pcie_mem_res	*mem_res;
+	struct exynos_pcie_clk_res	*clk_res;
+	const struct exynos_pcie_ops	*ops;
+	int				reset_gpio;
+
+	struct phy			*phy;
+};
+
+struct exynos_pcie_ops {
+	int (*get_mem_resources)(struct platform_device *pdev,
+			struct exynos_pcie *ep);
+	int (*get_clk_resources)(struct exynos_pcie *ep);
+	int (*init_clk_resources)(struct exynos_pcie *ep);
+	void (*deinit_clk_resources)(struct exynos_pcie *ep);
+};
+
+static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev,
+					     struct exynos_pcie *ep)
+{
+	struct dw_pcie *pci = ep->pci;
+	struct device *dev = pci->dev;
+	struct resource *res;
+
+	ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL);
+	if (!ep->mem_res)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ep->mem_res->elbi_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(ep->mem_res->elbi_base))
+		return PTR_ERR(ep->mem_res->elbi_base);
+
+	return 0;
+}
+
+static int exynos5440_pcie_get_clk_resources(struct exynos_pcie *ep)
+{
+	struct dw_pcie *pci = ep->pci;
+	struct device *dev = pci->dev;
+
+	ep->clk_res = devm_kzalloc(dev, sizeof(*ep->clk_res), GFP_KERNEL);
+	if (!ep->clk_res)
+		return -ENOMEM;
+
+	ep->clk_res->clk = devm_clk_get(dev, "pcie");
+	if (IS_ERR(ep->clk_res->clk)) {
+		dev_err(dev, "Failed to get pcie rc clock\n");
+		return PTR_ERR(ep->clk_res->clk);
+	}
+
+	ep->clk_res->bus_clk = devm_clk_get(dev, "pcie_bus");
+	if (IS_ERR(ep->clk_res->bus_clk)) {
+		dev_err(dev, "Failed to get pcie bus clock\n");
+		return PTR_ERR(ep->clk_res->bus_clk);
+	}
+
+	return 0;
+}
+
+static int exynos5440_pcie_init_clk_resources(struct exynos_pcie *ep)
+{
+	struct dw_pcie *pci = ep->pci;
+	struct device *dev = pci->dev;
+	int ret;
+
+	ret = clk_prepare_enable(ep->clk_res->clk);
+	if (ret) {
+		dev_err(dev, "cannot enable pcie rc clock");
+		return ret;
+	}
+
+	ret = clk_prepare_enable(ep->clk_res->bus_clk);
+	if (ret) {
+		dev_err(dev, "cannot enable pcie bus clock");
+		goto err_bus_clk;
+	}
+
+	return 0;
+
+err_bus_clk:
+	clk_disable_unprepare(ep->clk_res->clk);
+
+	return ret;
+}
+
+static void exynos5440_pcie_deinit_clk_resources(struct exynos_pcie *ep)
+{
+	clk_disable_unprepare(ep->clk_res->bus_clk);
+	clk_disable_unprepare(ep->clk_res->clk);
+}
+
+static const struct exynos_pcie_ops exynos5440_pcie_ops = {
+	.get_mem_resources	= exynos5440_pcie_get_mem_resources,
+	.get_clk_resources	= exynos5440_pcie_get_clk_resources,
+	.init_clk_resources	= exynos5440_pcie_init_clk_resources,
+	.deinit_clk_resources	= exynos5440_pcie_deinit_clk_resources,
+};
+
+static void exynos_pcie_writel(void __iomem *base, u32 val, u32 reg)
+{
+	writel(val, base + reg);
+}
+
+static u32 exynos_pcie_readl(void __iomem *base, u32 reg)
+{
+	return readl(base + reg);
+}
+
+static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *ep, bool on)
+{
+	u32 val;
+
+	val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_AWMISC);
+	if (on)
+		val |= PCIE_ELBI_SLV_DBI_ENABLE;
+	else
+		val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+	exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_AWMISC);
+}
+
+static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *ep, bool on)
+{
+	u32 val;
+
+	val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_ARMISC);
+	if (on)
+		val |= PCIE_ELBI_SLV_DBI_ENABLE;
+	else
+		val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+	exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_ARMISC);
+}
+
+static void exynos_pcie_assert_core_reset(struct exynos_pcie *ep)
+{
+	u32 val;
+
+	val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET);
+	val &= ~PCIE_CORE_RESET_ENABLE;
+	exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET);
+	exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_PWR_RESET);
+	exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_STICKY_RESET);
+	exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_NONSTICKY_RESET);
+}
+
+static void exynos_pcie_deassert_core_reset(struct exynos_pcie *ep)
+{
+	u32 val;
+
+	val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET);
+	val |= PCIE_CORE_RESET_ENABLE;
+
+	exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET);
+	exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_STICKY_RESET);
+	exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_NONSTICKY_RESET);
+	exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_APP_INIT_RESET);
+	exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_APP_INIT_RESET);
+}
+
+static void exynos_pcie_assert_reset(struct exynos_pcie *ep)
+{
+	struct dw_pcie *pci = ep->pci;
+	struct device *dev = pci->dev;
+
+	if (ep->reset_gpio >= 0)
+		devm_gpio_request_one(dev, ep->reset_gpio,
+				GPIOF_OUT_INIT_HIGH, "RESET");
+}
+
+static int exynos_pcie_establish_link(struct exynos_pcie *ep)
+{
+	struct dw_pcie *pci = ep->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = pci->dev;
+
+	if (dw_pcie_link_up(pci)) {
+		dev_err(dev, "Link already up\n");
+		return 0;
+	}
+
+	exynos_pcie_assert_core_reset(ep);
+
+	phy_reset(ep->phy);
+
+	exynos_pcie_writel(ep->mem_res->elbi_base, 1,
+			PCIE_PWR_RESET);
+
+	phy_power_on(ep->phy);
+	phy_init(ep->phy);
+
+	exynos_pcie_deassert_core_reset(ep);
+	dw_pcie_setup_rc(pp);
+	exynos_pcie_assert_reset(ep);
+
+	/* assert LTSSM enable */
+	exynos_pcie_writel(ep->mem_res->elbi_base, PCIE_ELBI_LTSSM_ENABLE,
+			  PCIE_APP_LTSSM_ENABLE);
+
+	/* check if the link is up or not */
+	if (!dw_pcie_wait_for_link(pci))
+		return 0;
+
+	phy_power_off(ep->phy);
+	return -ETIMEDOUT;
+}
+
+static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *ep)
+{
+	u32 val;
+
+	val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_PULSE);
+	exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_PULSE);
+}
+
+static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *ep)
+{
+	u32 val;
+
+	/* enable INTX interrupt */
+	val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
+		IRQ_INTC_ASSERT | IRQ_INTD_ASSERT;
+	exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_PULSE);
+}
+
+static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
+{
+	struct exynos_pcie *ep = arg;
+
+	exynos_pcie_clear_irq_pulse(ep);
+	return IRQ_HANDLED;
+}
+
+static void exynos_pcie_msi_init(struct exynos_pcie *ep)
+{
+	struct dw_pcie *pci = ep->pci;
+	struct pcie_port *pp = &pci->pp;
+	u32 val;
+
+	dw_pcie_msi_init(pp);
+
+	/* enable MSI interrupt */
+	val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_EN_LEVEL);
+	val |= IRQ_MSI_ENABLE;
+	exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_LEVEL);
+}
+
+static void exynos_pcie_enable_interrupts(struct exynos_pcie *ep)
+{
+	exynos_pcie_enable_irq_pulse(ep);
+
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		exynos_pcie_msi_init(ep);
+}
+
+static u32 exynos_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
+				u32 reg, size_t size)
+{
+	struct exynos_pcie *ep = to_exynos_pcie(pci);
+	u32 val;
+
+	exynos_pcie_sideband_dbi_r_mode(ep, true);
+	dw_pcie_read(base + reg, size, &val);
+	exynos_pcie_sideband_dbi_r_mode(ep, false);
+	return val;
+}
+
+static void exynos_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
+				  u32 reg, size_t size, u32 val)
+{
+	struct exynos_pcie *ep = to_exynos_pcie(pci);
+
+	exynos_pcie_sideband_dbi_w_mode(ep, true);
+	dw_pcie_write(base + reg, size, val);
+	exynos_pcie_sideband_dbi_w_mode(ep, false);
+}
+
+static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
+				u32 *val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct exynos_pcie *ep = to_exynos_pcie(pci);
+	int ret;
+
+	exynos_pcie_sideband_dbi_r_mode(ep, true);
+	ret = dw_pcie_read(pci->dbi_base + where, size, val);
+	exynos_pcie_sideband_dbi_r_mode(ep, false);
+	return ret;
+}
+
+static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
+				u32 val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct exynos_pcie *ep = to_exynos_pcie(pci);
+	int ret;
+
+	exynos_pcie_sideband_dbi_w_mode(ep, true);
+	ret = dw_pcie_write(pci->dbi_base + where, size, val);
+	exynos_pcie_sideband_dbi_w_mode(ep, false);
+	return ret;
+}
+
+static int exynos_pcie_link_up(struct dw_pcie *pci)
+{
+	struct exynos_pcie *ep = to_exynos_pcie(pci);
+	u32 val;
+
+	val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_RDLH_LINKUP);
+	if (val == PCIE_ELBI_LTSSM_ENABLE)
+		return 1;
+
+	return 0;
+}
+
+static int exynos_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct exynos_pcie *ep = to_exynos_pcie(pci);
+
+	exynos_pcie_establish_link(ep);
+	exynos_pcie_enable_interrupts(ep);
+
+	return 0;
+}
+
+static const struct dw_pcie_host_ops exynos_pcie_host_ops = {
+	.rd_own_conf = exynos_pcie_rd_own_conf,
+	.wr_own_conf = exynos_pcie_wr_own_conf,
+	.host_init = exynos_pcie_host_init,
+};
+
+static int __init exynos_add_pcie_port(struct exynos_pcie *ep,
+				       struct platform_device *pdev)
+{
+	struct dw_pcie *pci = ep->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	pp->irq = platform_get_irq(pdev, 1);
+	if (pp->irq < 0) {
+		dev_err(dev, "failed to get irq\n");
+		return pp->irq;
+	}
+	ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler,
+				IRQF_SHARED, "exynos-pcie", ep);
+	if (ret) {
+		dev_err(dev, "failed to request irq\n");
+		return ret;
+	}
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		pp->msi_irq = platform_get_irq(pdev, 0);
+		if (pp->msi_irq < 0) {
+			dev_err(dev, "failed to get msi irq\n");
+			return pp->msi_irq;
+		}
+	}
+
+	pp->ops = &exynos_pcie_host_ops;
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "failed to initialize host\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	.read_dbi = exynos_pcie_read_dbi,
+	.write_dbi = exynos_pcie_write_dbi,
+	.link_up = exynos_pcie_link_up,
+};
+
+static int __init exynos_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dw_pcie *pci;
+	struct exynos_pcie *ep;
+	struct device_node *np = dev->of_node;
+	int ret;
+
+	ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+	if (!ep)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	pci->dev = dev;
+	pci->ops = &dw_pcie_ops;
+
+	ep->pci = pci;
+	ep->ops = (const struct exynos_pcie_ops *)
+		of_device_get_match_data(dev);
+
+	ep->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+
+	ep->phy = devm_of_phy_get(dev, np, NULL);
+	if (IS_ERR(ep->phy)) {
+		if (PTR_ERR(ep->phy) != -ENODEV)
+			return PTR_ERR(ep->phy);
+
+		ep->phy = NULL;
+	}
+
+	if (ep->ops && ep->ops->get_mem_resources) {
+		ret = ep->ops->get_mem_resources(pdev, ep);
+		if (ret)
+			return ret;
+	}
+
+	if (ep->ops && ep->ops->get_clk_resources &&
+			ep->ops->init_clk_resources) {
+		ret = ep->ops->get_clk_resources(ep);
+		if (ret)
+			return ret;
+		ret = ep->ops->init_clk_resources(ep);
+		if (ret)
+			return ret;
+	}
+
+	platform_set_drvdata(pdev, ep);
+
+	ret = exynos_add_pcie_port(ep, pdev);
+	if (ret < 0)
+		goto fail_probe;
+
+	return 0;
+
+fail_probe:
+	phy_exit(ep->phy);
+
+	if (ep->ops && ep->ops->deinit_clk_resources)
+		ep->ops->deinit_clk_resources(ep);
+	return ret;
+}
+
+static int __exit exynos_pcie_remove(struct platform_device *pdev)
+{
+	struct exynos_pcie *ep = platform_get_drvdata(pdev);
+
+	if (ep->ops && ep->ops->deinit_clk_resources)
+		ep->ops->deinit_clk_resources(ep);
+
+	return 0;
+}
+
+static const struct of_device_id exynos_pcie_of_match[] = {
+	{
+		.compatible = "samsung,exynos5440-pcie",
+		.data = &exynos5440_pcie_ops
+	},
+	{},
+};
+
+static struct platform_driver exynos_pcie_driver = {
+	.remove		= __exit_p(exynos_pcie_remove),
+	.driver = {
+		.name	= "exynos-pcie",
+		.of_match_table = exynos_pcie_of_match,
+	},
+};
+
+/* Exynos PCIe driver does not allow module unload */
+
+static int __init exynos_pcie_init(void)
+{
+	return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe);
+}
+subsys_initcall(exynos_pcie_init);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pci-imx6.c b/marvell/linux/drivers/pci/controller/dwc/pci-imx6.c
new file mode 100644
index 0000000..30c259f
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pci-imx6.c
@@ -0,0 +1,1318 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Freescale i.MX6 SoCs
+ *
+ * Copyright (C) 2013 Kosagi
+ *		http://www.kosagi.com
+ *
+ * Author: Sean Cross <xobs@kosagi.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/reset.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+
+#include "pcie-designware.h"
+
+#define IMX8MQ_GPR_PCIE_REF_USE_PAD		BIT(9)
+#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN	BIT(10)
+#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE	BIT(11)
+#define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE	GENMASK(11, 8)
+#define IMX8MQ_PCIE2_BASE_ADDR			0x33c00000
+
+#define to_imx6_pcie(x)	dev_get_drvdata((x)->dev)
+
+enum imx6_pcie_variants {
+	IMX6Q,
+	IMX6SX,
+	IMX6QP,
+	IMX7D,
+	IMX8MQ,
+};
+
+#define IMX6_PCIE_FLAG_IMX6_PHY			BIT(0)
+#define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE	BIT(1)
+#define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND		BIT(2)
+
+struct imx6_pcie_drvdata {
+	enum imx6_pcie_variants variant;
+	u32 flags;
+	int dbi_length;
+};
+
+struct imx6_pcie {
+	struct dw_pcie		*pci;
+	int			reset_gpio;
+	bool			gpio_active_high;
+	struct clk		*pcie_bus;
+	struct clk		*pcie_phy;
+	struct clk		*pcie_inbound_axi;
+	struct clk		*pcie;
+	struct clk		*pcie_aux;
+	struct regmap		*iomuxc_gpr;
+	u32			controller_id;
+	struct reset_control	*pciephy_reset;
+	struct reset_control	*apps_reset;
+	struct reset_control	*turnoff_reset;
+	u32			tx_deemph_gen1;
+	u32			tx_deemph_gen2_3p5db;
+	u32			tx_deemph_gen2_6db;
+	u32			tx_swing_full;
+	u32			tx_swing_low;
+	int			link_gen;
+	struct regulator	*vpcie;
+	void __iomem		*phy_base;
+
+	/* power domain for pcie */
+	struct device		*pd_pcie;
+	/* power domain for pcie phy */
+	struct device		*pd_pcie_phy;
+	const struct imx6_pcie_drvdata *drvdata;
+};
+
+/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
+#define PHY_PLL_LOCK_WAIT_USLEEP_MAX	200
+#define PHY_PLL_LOCK_WAIT_TIMEOUT	(2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX)
+
+/* PCIe Root Complex registers (memory-mapped) */
+#define PCIE_RC_IMX6_MSI_CAP			0x50
+#define PCIE_RC_LCR				0x7c
+#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1	0x1
+#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2	0x2
+#define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK	0xf
+
+#define PCIE_RC_LCSR				0x80
+
+/* PCIe Port Logic registers (memory-mapped) */
+#define PL_OFFSET 0x700
+
+#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
+#define PCIE_PHY_CTRL_DATA(x)		FIELD_PREP(GENMASK(15, 0), (x))
+#define PCIE_PHY_CTRL_CAP_ADR		BIT(16)
+#define PCIE_PHY_CTRL_CAP_DAT		BIT(17)
+#define PCIE_PHY_CTRL_WR		BIT(18)
+#define PCIE_PHY_CTRL_RD		BIT(19)
+
+#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
+#define PCIE_PHY_STAT_ACK		BIT(16)
+
+#define PCIE_LINK_WIDTH_SPEED_CONTROL	0x80C
+
+/* PHY registers (not memory-mapped) */
+#define PCIE_PHY_ATEOVRD			0x10
+#define  PCIE_PHY_ATEOVRD_EN			BIT(2)
+#define  PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT	0
+#define  PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK	0x1
+
+#define PCIE_PHY_MPLL_OVRD_IN_LO		0x11
+#define  PCIE_PHY_MPLL_MULTIPLIER_SHIFT		2
+#define  PCIE_PHY_MPLL_MULTIPLIER_MASK		0x7f
+#define  PCIE_PHY_MPLL_MULTIPLIER_OVRD		BIT(9)
+
+#define PCIE_PHY_RX_ASIC_OUT 0x100D
+#define PCIE_PHY_RX_ASIC_OUT_VALID	(1 << 0)
+
+/* iMX7 PCIe PHY registers */
+#define PCIE_PHY_CMN_REG4		0x14
+/* These are probably the bits that *aren't* DCC_FB_EN */
+#define PCIE_PHY_CMN_REG4_DCC_FB_EN	0x29
+
+#define PCIE_PHY_CMN_REG15	        0x54
+#define PCIE_PHY_CMN_REG15_DLY_4	BIT(2)
+#define PCIE_PHY_CMN_REG15_PLL_PD	BIT(5)
+#define PCIE_PHY_CMN_REG15_OVRD_PLL_PD	BIT(7)
+
+#define PCIE_PHY_CMN_REG24		0x90
+#define PCIE_PHY_CMN_REG24_RX_EQ	BIT(6)
+#define PCIE_PHY_CMN_REG24_RX_EQ_SEL	BIT(3)
+
+#define PCIE_PHY_CMN_REG26		0x98
+#define PCIE_PHY_CMN_REG26_ATT_MODE	0xBC
+
+#define PHY_RX_OVRD_IN_LO 0x1005
+#define PHY_RX_OVRD_IN_LO_RX_DATA_EN		BIT(5)
+#define PHY_RX_OVRD_IN_LO_RX_PLL_EN		BIT(3)
+
+static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
+{
+	struct dw_pcie *pci = imx6_pcie->pci;
+	bool val;
+	u32 max_iterations = 10;
+	u32 wait_counter = 0;
+
+	do {
+		val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT) &
+			PCIE_PHY_STAT_ACK;
+		wait_counter++;
+
+		if (val == exp_val)
+			return 0;
+
+		udelay(1);
+	} while (wait_counter < max_iterations);
+
+	return -ETIMEDOUT;
+}
+
+static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
+{
+	struct dw_pcie *pci = imx6_pcie->pci;
+	u32 val;
+	int ret;
+
+	val = PCIE_PHY_CTRL_DATA(addr);
+	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
+
+	val |= PCIE_PHY_CTRL_CAP_ADR;
+	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
+
+	ret = pcie_phy_poll_ack(imx6_pcie, true);
+	if (ret)
+		return ret;
+
+	val = PCIE_PHY_CTRL_DATA(addr);
+	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
+
+	return pcie_phy_poll_ack(imx6_pcie, false);
+}
+
+/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
+static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
+{
+	struct dw_pcie *pci = imx6_pcie->pci;
+	u32 phy_ctl;
+	int ret;
+
+	ret = pcie_phy_wait_ack(imx6_pcie, addr);
+	if (ret)
+		return ret;
+
+	/* assert Read signal */
+	phy_ctl = PCIE_PHY_CTRL_RD;
+	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
+
+	ret = pcie_phy_poll_ack(imx6_pcie, true);
+	if (ret)
+		return ret;
+
+	*data = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
+
+	/* deassert Read signal */
+	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
+
+	return pcie_phy_poll_ack(imx6_pcie, false);
+}
+
+static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
+{
+	struct dw_pcie *pci = imx6_pcie->pci;
+	u32 var;
+	int ret;
+
+	/* write addr */
+	/* cap addr */
+	ret = pcie_phy_wait_ack(imx6_pcie, addr);
+	if (ret)
+		return ret;
+
+	var = PCIE_PHY_CTRL_DATA(data);
+	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
+
+	/* capture data */
+	var |= PCIE_PHY_CTRL_CAP_DAT;
+	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
+
+	ret = pcie_phy_poll_ack(imx6_pcie, true);
+	if (ret)
+		return ret;
+
+	/* deassert cap data */
+	var = PCIE_PHY_CTRL_DATA(data);
+	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
+
+	/* wait for ack de-assertion */
+	ret = pcie_phy_poll_ack(imx6_pcie, false);
+	if (ret)
+		return ret;
+
+	/* assert wr signal */
+	var = PCIE_PHY_CTRL_WR;
+	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
+
+	/* wait for ack */
+	ret = pcie_phy_poll_ack(imx6_pcie, true);
+	if (ret)
+		return ret;
+
+	/* deassert wr signal */
+	var = PCIE_PHY_CTRL_DATA(data);
+	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
+
+	/* wait for ack de-assertion */
+	ret = pcie_phy_poll_ack(imx6_pcie, false);
+	if (ret)
+		return ret;
+
+	dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0);
+
+	return 0;
+}
+
+static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
+{
+	u16 tmp;
+
+	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
+		return;
+
+	pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
+	tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
+		PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+	pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
+
+	usleep_range(2000, 3000);
+
+	pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
+	tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
+		  PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+	pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
+}
+
+#ifdef CONFIG_ARM
+/*  Added for PCI abort handling */
+static int imx6q_pcie_abort_handler(unsigned long addr,
+		unsigned int fsr, struct pt_regs *regs)
+{
+	unsigned long pc = instruction_pointer(regs);
+	unsigned long instr = *(unsigned long *)pc;
+	int reg = (instr >> 12) & 15;
+
+	/*
+	 * If the instruction being executed was a read,
+	 * make it look like it read all-ones.
+	 */
+	if ((instr & 0x0c100000) == 0x04100000) {
+		unsigned long val;
+
+		if (instr & 0x00400000)
+			val = 255;
+		else
+			val = -1;
+
+		regs->uregs[reg] = val;
+		regs->ARM_pc += 4;
+		return 0;
+	}
+
+	if ((instr & 0x0e100090) == 0x00100090) {
+		regs->uregs[reg] = -1;
+		regs->ARM_pc += 4;
+		return 0;
+	}
+
+	return 1;
+}
+#endif
+
+static int imx6_pcie_attach_pd(struct device *dev)
+{
+	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+	struct device_link *link;
+
+	/* Do nothing when in a single power domain */
+	if (dev->pm_domain)
+		return 0;
+
+	imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
+	if (IS_ERR(imx6_pcie->pd_pcie))
+		return PTR_ERR(imx6_pcie->pd_pcie);
+	/* Do nothing when power domain missing */
+	if (!imx6_pcie->pd_pcie)
+		return 0;
+	link = device_link_add(dev, imx6_pcie->pd_pcie,
+			DL_FLAG_STATELESS |
+			DL_FLAG_PM_RUNTIME |
+			DL_FLAG_RPM_ACTIVE);
+	if (!link) {
+		dev_err(dev, "Failed to add device_link to pcie pd.\n");
+		return -EINVAL;
+	}
+
+	imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
+	if (IS_ERR(imx6_pcie->pd_pcie_phy))
+		return PTR_ERR(imx6_pcie->pd_pcie_phy);
+
+	link = device_link_add(dev, imx6_pcie->pd_pcie_phy,
+			DL_FLAG_STATELESS |
+			DL_FLAG_PM_RUNTIME |
+			DL_FLAG_RPM_ACTIVE);
+	if (!link) {
+		dev_err(dev, "Failed to add device_link to pcie_phy pd.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
+{
+	struct device *dev = imx6_pcie->pci->dev;
+
+	switch (imx6_pcie->drvdata->variant) {
+	case IMX7D:
+	case IMX8MQ:
+		reset_control_assert(imx6_pcie->pciephy_reset);
+		reset_control_assert(imx6_pcie->apps_reset);
+		break;
+	case IMX6SX:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
+				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
+		/* Force PCIe PHY reset */
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
+				   IMX6SX_GPR5_PCIE_BTNRST_RESET,
+				   IMX6SX_GPR5_PCIE_BTNRST_RESET);
+		break;
+	case IMX6QP:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+				   IMX6Q_GPR1_PCIE_SW_RST,
+				   IMX6Q_GPR1_PCIE_SW_RST);
+		break;
+	case IMX6Q:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+				   IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+				   IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
+		break;
+	}
+
+	if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
+		int ret = regulator_disable(imx6_pcie->vpcie);
+
+		if (ret)
+			dev_err(dev, "failed to disable vpcie regulator: %d\n",
+				ret);
+	}
+
+	/* Some boards don't have PCIe reset GPIO. */
+	if (gpio_is_valid(imx6_pcie->reset_gpio))
+		gpio_set_value_cansleep(imx6_pcie->reset_gpio,
+					imx6_pcie->gpio_active_high);
+}
+
+static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
+{
+	WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ);
+	return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
+}
+
+static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
+{
+	struct dw_pcie *pci = imx6_pcie->pci;
+	struct device *dev = pci->dev;
+	unsigned int offset;
+	int ret = 0;
+
+	switch (imx6_pcie->drvdata->variant) {
+	case IMX6SX:
+		ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
+		if (ret) {
+			dev_err(dev, "unable to enable pcie_axi clock\n");
+			break;
+		}
+
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
+		break;
+	case IMX6QP:		/* FALLTHROUGH */
+	case IMX6Q:
+		/* power up core phy and enable ref clock */
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+				   IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
+		/*
+		 * the async reset input need ref clock to sync internally,
+		 * when the ref clock comes after reset, internal synced
+		 * reset time is too short, cannot meet the requirement.
+		 * add one ~10us delay here.
+		 */
+		usleep_range(10, 100);
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+				   IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
+		break;
+	case IMX7D:
+		break;
+	case IMX8MQ:
+		ret = clk_prepare_enable(imx6_pcie->pcie_aux);
+		if (ret) {
+			dev_err(dev, "unable to enable pcie_aux clock\n");
+			break;
+		}
+
+		offset = imx6_pcie_grp_offset(imx6_pcie);
+		/*
+		 * Set the over ride low and enabled
+		 * make sure that REF_CLK is turned on.
+		 */
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+				   IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
+				   0);
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
+				   IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
+				   IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
+		break;
+	}
+
+	return ret;
+}
+
+static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
+{
+	u32 val;
+	struct device *dev = imx6_pcie->pci->dev;
+
+	if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr,
+				     IOMUXC_GPR22, val,
+				     val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
+				     PHY_PLL_LOCK_WAIT_USLEEP_MAX,
+				     PHY_PLL_LOCK_WAIT_TIMEOUT))
+		dev_err(dev, "PCIe PLL lock timeout\n");
+}
+
+static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
+{
+	struct dw_pcie *pci = imx6_pcie->pci;
+	struct device *dev = pci->dev;
+	int ret;
+
+	if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) {
+		ret = regulator_enable(imx6_pcie->vpcie);
+		if (ret) {
+			dev_err(dev, "failed to enable vpcie regulator: %d\n",
+				ret);
+			return;
+		}
+	}
+
+	ret = clk_prepare_enable(imx6_pcie->pcie_phy);
+	if (ret) {
+		dev_err(dev, "unable to enable pcie_phy clock\n");
+		goto err_pcie_phy;
+	}
+
+	ret = clk_prepare_enable(imx6_pcie->pcie_bus);
+	if (ret) {
+		dev_err(dev, "unable to enable pcie_bus clock\n");
+		goto err_pcie_bus;
+	}
+
+	ret = clk_prepare_enable(imx6_pcie->pcie);
+	if (ret) {
+		dev_err(dev, "unable to enable pcie clock\n");
+		goto err_pcie;
+	}
+
+	ret = imx6_pcie_enable_ref_clk(imx6_pcie);
+	if (ret) {
+		dev_err(dev, "unable to enable pcie ref clock\n");
+		goto err_ref_clk;
+	}
+
+	/* allow the clocks to stabilize */
+	usleep_range(200, 500);
+
+	switch (imx6_pcie->drvdata->variant) {
+	case IMX8MQ:
+		reset_control_deassert(imx6_pcie->pciephy_reset);
+		break;
+	case IMX7D:
+		reset_control_deassert(imx6_pcie->pciephy_reset);
+
+		/* Workaround for ERR010728, failure of PCI-e PLL VCO to
+		 * oscillate, especially when cold.  This turns off "Duty-cycle
+		 * Corrector" and other mysterious undocumented things.
+		 */
+		if (likely(imx6_pcie->phy_base)) {
+			/* De-assert DCC_FB_EN */
+			writel(PCIE_PHY_CMN_REG4_DCC_FB_EN,
+			       imx6_pcie->phy_base + PCIE_PHY_CMN_REG4);
+			/* Assert RX_EQS and RX_EQS_SEL */
+			writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL
+				| PCIE_PHY_CMN_REG24_RX_EQ,
+			       imx6_pcie->phy_base + PCIE_PHY_CMN_REG24);
+			/* Assert ATT_MODE */
+			writel(PCIE_PHY_CMN_REG26_ATT_MODE,
+			       imx6_pcie->phy_base + PCIE_PHY_CMN_REG26);
+		} else {
+			dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n");
+		}
+
+		imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
+		break;
+	case IMX6SX:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
+				   IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
+		break;
+	case IMX6QP:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+				   IMX6Q_GPR1_PCIE_SW_RST, 0);
+
+		usleep_range(200, 500);
+		break;
+	case IMX6Q:		/* Nothing to do */
+		break;
+	}
+
+	/* Some boards don't have PCIe reset GPIO. */
+	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+		msleep(100);
+		gpio_set_value_cansleep(imx6_pcie->reset_gpio,
+					!imx6_pcie->gpio_active_high);
+		/* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
+		msleep(100);
+	}
+
+	return;
+
+err_ref_clk:
+	clk_disable_unprepare(imx6_pcie->pcie);
+err_pcie:
+	clk_disable_unprepare(imx6_pcie->pcie_bus);
+err_pcie_bus:
+	clk_disable_unprepare(imx6_pcie->pcie_phy);
+err_pcie_phy:
+	if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
+		ret = regulator_disable(imx6_pcie->vpcie);
+		if (ret)
+			dev_err(dev, "failed to disable vpcie regulator: %d\n",
+				ret);
+	}
+}
+
+static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
+{
+	unsigned int mask, val;
+
+	if (imx6_pcie->drvdata->variant == IMX8MQ &&
+	    imx6_pcie->controller_id == 1) {
+		mask   = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
+		val    = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
+				    PCI_EXP_TYPE_ROOT_PORT);
+	} else {
+		mask = IMX6Q_GPR12_DEVICE_TYPE;
+		val  = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE,
+				  PCI_EXP_TYPE_ROOT_PORT);
+	}
+
+	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
+}
+
+static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+{
+	switch (imx6_pcie->drvdata->variant) {
+	case IMX8MQ:
+		/*
+		 * TODO: Currently this code assumes external
+		 * oscillator is being used
+		 */
+		regmap_update_bits(imx6_pcie->iomuxc_gpr,
+				   imx6_pcie_grp_offset(imx6_pcie),
+				   IMX8MQ_GPR_PCIE_REF_USE_PAD,
+				   IMX8MQ_GPR_PCIE_REF_USE_PAD);
+		break;
+	case IMX7D:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
+		break;
+	case IMX6SX:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				   IMX6SX_GPR12_PCIE_RX_EQ_MASK,
+				   IMX6SX_GPR12_PCIE_RX_EQ_2);
+		/* FALLTHROUGH */
+	default:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				   IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+
+		/* configure constant input signal to the pcie ctrl and phy */
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				   IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
+
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+				   IMX6Q_GPR8_TX_DEEMPH_GEN1,
+				   imx6_pcie->tx_deemph_gen1 << 0);
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+				   IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
+				   imx6_pcie->tx_deemph_gen2_3p5db << 6);
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+				   IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
+				   imx6_pcie->tx_deemph_gen2_6db << 12);
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+				   IMX6Q_GPR8_TX_SWING_FULL,
+				   imx6_pcie->tx_swing_full << 18);
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+				   IMX6Q_GPR8_TX_SWING_LOW,
+				   imx6_pcie->tx_swing_low << 25);
+		break;
+	}
+
+	imx6_pcie_configure_type(imx6_pcie);
+}
+
+static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
+{
+	unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
+	int mult, div;
+	u16 val;
+
+	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
+		return 0;
+
+	switch (phy_rate) {
+	case 125000000:
+		/*
+		 * The default settings of the MPLL are for a 125MHz input
+		 * clock, so no need to reconfigure anything in that case.
+		 */
+		return 0;
+	case 100000000:
+		mult = 25;
+		div = 0;
+		break;
+	case 200000000:
+		mult = 25;
+		div = 1;
+		break;
+	default:
+		dev_err(imx6_pcie->pci->dev,
+			"Unsupported PHY reference clock rate %lu\n", phy_rate);
+		return -EINVAL;
+	}
+
+	pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
+	val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
+		 PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
+	val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
+	val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
+	pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
+
+	pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
+	val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
+		 PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
+	val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
+	val |= PCIE_PHY_ATEOVRD_EN;
+	pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);
+
+	return 0;
+}
+
+static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
+{
+	struct dw_pcie *pci = imx6_pcie->pci;
+	struct device *dev = pci->dev;
+	u32 tmp;
+	unsigned int retries;
+
+	for (retries = 0; retries < 200; retries++) {
+		tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+		/* Test if the speed change finished. */
+		if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
+			return 0;
+		usleep_range(100, 1000);
+	}
+
+	dev_err(dev, "Speed change timeout\n");
+	return -ETIMEDOUT;
+}
+
+static void imx6_pcie_ltssm_enable(struct device *dev)
+{
+	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+
+	switch (imx6_pcie->drvdata->variant) {
+	case IMX6Q:
+	case IMX6SX:
+	case IMX6QP:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				   IMX6Q_GPR12_PCIE_CTL_2,
+				   IMX6Q_GPR12_PCIE_CTL_2);
+		break;
+	case IMX7D:
+	case IMX8MQ:
+		reset_control_deassert(imx6_pcie->apps_reset);
+		break;
+	}
+}
+
+static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
+{
+	struct dw_pcie *pci = imx6_pcie->pci;
+	struct device *dev = pci->dev;
+	u32 tmp;
+	int ret;
+
+	/*
+	 * Force Gen1 operation when starting the link.  In case the link is
+	 * started in Gen2 mode, there is a possibility the devices on the
+	 * bus will not be detected at all.  This happens with PCIe switches.
+	 */
+	tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
+	tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
+	tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
+	dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
+
+	/* Start LTSSM. */
+	imx6_pcie_ltssm_enable(dev);
+
+	ret = dw_pcie_wait_for_link(pci);
+	if (ret)
+		goto err_reset_phy;
+
+	if (imx6_pcie->link_gen == 2) {
+		/* Allow Gen2 mode after the link is up. */
+		tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
+		tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
+		tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
+		dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
+
+		/*
+		 * Start Directed Speed Change so the best possible
+		 * speed both link partners support can be negotiated.
+		 */
+		tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+		tmp |= PORT_LOGIC_SPEED_CHANGE;
+		dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
+
+		if (imx6_pcie->drvdata->flags &
+		    IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) {
+			/*
+			 * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
+			 * from i.MX6 family when no link speed transition
+			 * occurs and we go Gen1 -> yep, Gen1. The difference
+			 * is that, in such case, it will not be cleared by HW
+			 * which will cause the following code to report false
+			 * failure.
+			 */
+
+			ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
+			if (ret) {
+				dev_err(dev, "Failed to bring link up!\n");
+				goto err_reset_phy;
+			}
+		}
+
+		/* Make sure link training is finished as well! */
+		ret = dw_pcie_wait_for_link(pci);
+		if (ret) {
+			dev_err(dev, "Failed to bring link up!\n");
+			goto err_reset_phy;
+		}
+	} else {
+		dev_info(dev, "Link: Gen2 disabled\n");
+	}
+
+	tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR);
+	dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
+	return 0;
+
+err_reset_phy:
+	dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
+		dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
+		dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
+	imx6_pcie_reset_phy(imx6_pcie);
+	return ret;
+}
+
+static int imx6_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+
+	imx6_pcie_assert_core_reset(imx6_pcie);
+	imx6_pcie_init_phy(imx6_pcie);
+	imx6_pcie_deassert_core_reset(imx6_pcie);
+	imx6_setup_phy_mpll(imx6_pcie);
+	dw_pcie_setup_rc(pp);
+	imx6_pcie_establish_link(imx6_pcie);
+
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		dw_pcie_msi_init(pp);
+
+	return 0;
+}
+
+static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
+	.host_init = imx6_pcie_host_init,
+};
+
+static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
+			      struct platform_device *pdev)
+{
+	struct dw_pcie *pci = imx6_pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		pp->msi_irq = platform_get_irq_byname(pdev, "msi");
+		if (pp->msi_irq <= 0) {
+			dev_err(dev, "failed to get MSI irq\n");
+			return -ENODEV;
+		}
+	}
+
+	pp->ops = &imx6_pcie_host_ops;
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "failed to initialize host\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	/* No special ops needed, but pcie-designware still expects this struct */
+};
+
+#ifdef CONFIG_PM_SLEEP
+static void imx6_pcie_ltssm_disable(struct device *dev)
+{
+	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+
+	switch (imx6_pcie->drvdata->variant) {
+	case IMX6SX:
+	case IMX6QP:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				   IMX6Q_GPR12_PCIE_CTL_2, 0);
+		break;
+	case IMX7D:
+		reset_control_assert(imx6_pcie->apps_reset);
+		break;
+	default:
+		dev_err(dev, "ltssm_disable not supported\n");
+	}
+}
+
+static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
+{
+	struct device *dev = imx6_pcie->pci->dev;
+
+	/* Some variants have a turnoff reset in DT */
+	if (imx6_pcie->turnoff_reset) {
+		reset_control_assert(imx6_pcie->turnoff_reset);
+		reset_control_deassert(imx6_pcie->turnoff_reset);
+		goto pm_turnoff_sleep;
+	}
+
+	/* Others poke directly at IOMUXC registers */
+	switch (imx6_pcie->drvdata->variant) {
+	case IMX6SX:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				IMX6SX_GPR12_PCIE_PM_TURN_OFF,
+				IMX6SX_GPR12_PCIE_PM_TURN_OFF);
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
+		break;
+	default:
+		dev_err(dev, "PME_Turn_Off not implemented\n");
+		return;
+	}
+
+	/*
+	 * Components with an upstream port must respond to
+	 * PME_Turn_Off with PME_TO_Ack but we can't check.
+	 *
+	 * The standard recommends a 1-10ms timeout after which to
+	 * proceed anyway as if acks were received.
+	 */
+pm_turnoff_sleep:
+	usleep_range(1000, 10000);
+}
+
+static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
+{
+	clk_disable_unprepare(imx6_pcie->pcie);
+	clk_disable_unprepare(imx6_pcie->pcie_phy);
+	clk_disable_unprepare(imx6_pcie->pcie_bus);
+
+	switch (imx6_pcie->drvdata->variant) {
+	case IMX6SX:
+		clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
+		break;
+	case IMX7D:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
+				   IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
+		break;
+	case IMX8MQ:
+		clk_disable_unprepare(imx6_pcie->pcie_aux);
+		break;
+	default:
+		break;
+	}
+}
+
+static int imx6_pcie_suspend_noirq(struct device *dev)
+{
+	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+
+	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
+		return 0;
+
+	imx6_pcie_pm_turnoff(imx6_pcie);
+	imx6_pcie_clk_disable(imx6_pcie);
+	imx6_pcie_ltssm_disable(dev);
+
+	return 0;
+}
+
+static int imx6_pcie_resume_noirq(struct device *dev)
+{
+	int ret;
+	struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+	struct pcie_port *pp = &imx6_pcie->pci->pp;
+
+	if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
+		return 0;
+
+	imx6_pcie_assert_core_reset(imx6_pcie);
+	imx6_pcie_init_phy(imx6_pcie);
+	imx6_pcie_deassert_core_reset(imx6_pcie);
+	dw_pcie_setup_rc(pp);
+
+	ret = imx6_pcie_establish_link(imx6_pcie);
+	if (ret < 0)
+		dev_info(dev, "pcie link is down after resume.\n");
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops imx6_pcie_pm_ops = {
+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
+				      imx6_pcie_resume_noirq)
+};
+
+static int imx6_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dw_pcie *pci;
+	struct imx6_pcie *imx6_pcie;
+	struct device_node *np;
+	struct resource *dbi_base;
+	struct device_node *node = dev->of_node;
+	int ret;
+	u16 val;
+
+	imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
+	if (!imx6_pcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	pci->dev = dev;
+	pci->ops = &dw_pcie_ops;
+
+	imx6_pcie->pci = pci;
+	imx6_pcie->drvdata = of_device_get_match_data(dev);
+
+	/* Find the PHY if one is defined, only imx7d uses it */
+	np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0);
+	if (np) {
+		struct resource res;
+
+		ret = of_address_to_resource(np, 0, &res);
+		if (ret) {
+			dev_err(dev, "Unable to map PCIe PHY\n");
+			return ret;
+		}
+		imx6_pcie->phy_base = devm_ioremap_resource(dev, &res);
+		if (IS_ERR(imx6_pcie->phy_base)) {
+			dev_err(dev, "Unable to map PCIe PHY\n");
+			return PTR_ERR(imx6_pcie->phy_base);
+		}
+	}
+
+	dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
+	if (IS_ERR(pci->dbi_base))
+		return PTR_ERR(pci->dbi_base);
+
+	/* Fetch GPIOs */
+	imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
+	imx6_pcie->gpio_active_high = of_property_read_bool(node,
+						"reset-gpio-active-high");
+	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+		ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
+				imx6_pcie->gpio_active_high ?
+					GPIOF_OUT_INIT_HIGH :
+					GPIOF_OUT_INIT_LOW,
+				"PCIe reset");
+		if (ret) {
+			dev_err(dev, "unable to get reset gpio\n");
+			return ret;
+		}
+	} else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
+		return imx6_pcie->reset_gpio;
+	}
+
+	/* Fetch clocks */
+	imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
+	if (IS_ERR(imx6_pcie->pcie_phy)) {
+		dev_err(dev, "pcie_phy clock source missing or invalid\n");
+		return PTR_ERR(imx6_pcie->pcie_phy);
+	}
+
+	imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
+	if (IS_ERR(imx6_pcie->pcie_bus)) {
+		dev_err(dev, "pcie_bus clock source missing or invalid\n");
+		return PTR_ERR(imx6_pcie->pcie_bus);
+	}
+
+	imx6_pcie->pcie = devm_clk_get(dev, "pcie");
+	if (IS_ERR(imx6_pcie->pcie)) {
+		dev_err(dev, "pcie clock source missing or invalid\n");
+		return PTR_ERR(imx6_pcie->pcie);
+	}
+
+	switch (imx6_pcie->drvdata->variant) {
+	case IMX6SX:
+		imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
+							   "pcie_inbound_axi");
+		if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
+			dev_err(dev, "pcie_inbound_axi clock missing or invalid\n");
+			return PTR_ERR(imx6_pcie->pcie_inbound_axi);
+		}
+		break;
+	case IMX8MQ:
+		imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
+		if (IS_ERR(imx6_pcie->pcie_aux)) {
+			dev_err(dev, "pcie_aux clock source missing or invalid\n");
+			return PTR_ERR(imx6_pcie->pcie_aux);
+		}
+		/* fall through */
+	case IMX7D:
+		if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
+			imx6_pcie->controller_id = 1;
+
+		imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
+									    "pciephy");
+		if (IS_ERR(imx6_pcie->pciephy_reset)) {
+			dev_err(dev, "Failed to get PCIEPHY reset control\n");
+			return PTR_ERR(imx6_pcie->pciephy_reset);
+		}
+
+		imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
+									 "apps");
+		if (IS_ERR(imx6_pcie->apps_reset)) {
+			dev_err(dev, "Failed to get PCIE APPS reset control\n");
+			return PTR_ERR(imx6_pcie->apps_reset);
+		}
+		break;
+	default:
+		break;
+	}
+
+	/* Grab turnoff reset */
+	imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
+	if (IS_ERR(imx6_pcie->turnoff_reset)) {
+		dev_err(dev, "Failed to get TURNOFF reset control\n");
+		return PTR_ERR(imx6_pcie->turnoff_reset);
+	}
+
+	/* Grab GPR config register range */
+	imx6_pcie->iomuxc_gpr =
+		 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+	if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
+		dev_err(dev, "unable to find iomuxc registers\n");
+		return PTR_ERR(imx6_pcie->iomuxc_gpr);
+	}
+
+	/* Grab PCIe PHY Tx Settings */
+	if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
+				 &imx6_pcie->tx_deemph_gen1))
+		imx6_pcie->tx_deemph_gen1 = 0;
+
+	if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
+				 &imx6_pcie->tx_deemph_gen2_3p5db))
+		imx6_pcie->tx_deemph_gen2_3p5db = 0;
+
+	if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
+				 &imx6_pcie->tx_deemph_gen2_6db))
+		imx6_pcie->tx_deemph_gen2_6db = 20;
+
+	if (of_property_read_u32(node, "fsl,tx-swing-full",
+				 &imx6_pcie->tx_swing_full))
+		imx6_pcie->tx_swing_full = 127;
+
+	if (of_property_read_u32(node, "fsl,tx-swing-low",
+				 &imx6_pcie->tx_swing_low))
+		imx6_pcie->tx_swing_low = 127;
+
+	/* Limit link speed */
+	ret = of_property_read_u32(node, "fsl,max-link-speed",
+				   &imx6_pcie->link_gen);
+	if (ret)
+		imx6_pcie->link_gen = 1;
+
+	imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
+	if (IS_ERR(imx6_pcie->vpcie)) {
+		if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
+			return PTR_ERR(imx6_pcie->vpcie);
+		imx6_pcie->vpcie = NULL;
+	}
+
+	platform_set_drvdata(pdev, imx6_pcie);
+
+	ret = imx6_pcie_attach_pd(dev);
+	if (ret)
+		return ret;
+
+	ret = imx6_add_pcie_port(imx6_pcie, pdev);
+	if (ret < 0)
+		return ret;
+
+	if (pci_msi_enabled()) {
+		val = dw_pcie_readw_dbi(pci, PCIE_RC_IMX6_MSI_CAP +
+					PCI_MSI_FLAGS);
+		val |= PCI_MSI_FLAGS_ENABLE;
+		dw_pcie_writew_dbi(pci, PCIE_RC_IMX6_MSI_CAP + PCI_MSI_FLAGS,
+				   val);
+	}
+
+	return 0;
+}
+
+static void imx6_pcie_shutdown(struct platform_device *pdev)
+{
+	struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
+
+	/* bring down link, so bootloader gets clean state in case of reboot */
+	imx6_pcie_assert_core_reset(imx6_pcie);
+}
+
+static const struct imx6_pcie_drvdata drvdata[] = {
+	[IMX6Q] = {
+		.variant = IMX6Q,
+		.flags = IMX6_PCIE_FLAG_IMX6_PHY |
+			 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
+		.dbi_length = 0x200,
+	},
+	[IMX6SX] = {
+		.variant = IMX6SX,
+		.flags = IMX6_PCIE_FLAG_IMX6_PHY |
+			 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
+			 IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+	},
+	[IMX6QP] = {
+		.variant = IMX6QP,
+		.flags = IMX6_PCIE_FLAG_IMX6_PHY |
+			 IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
+	},
+	[IMX7D] = {
+		.variant = IMX7D,
+		.flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+	},
+	[IMX8MQ] = {
+		.variant = IMX8MQ,
+	},
+};
+
+static const struct of_device_id imx6_pcie_of_match[] = {
+	{ .compatible = "fsl,imx6q-pcie",  .data = &drvdata[IMX6Q],  },
+	{ .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], },
+	{ .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], },
+	{ .compatible = "fsl,imx7d-pcie",  .data = &drvdata[IMX7D],  },
+	{ .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], } ,
+	{},
+};
+
+static struct platform_driver imx6_pcie_driver = {
+	.driver = {
+		.name	= "imx6q-pcie",
+		.of_match_table = imx6_pcie_of_match,
+		.suppress_bind_attrs = true,
+		.pm = &imx6_pcie_pm_ops,
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+	},
+	.probe    = imx6_pcie_probe,
+	.shutdown = imx6_pcie_shutdown,
+};
+
+static void imx6_pcie_quirk(struct pci_dev *dev)
+{
+	struct pci_bus *bus = dev->bus;
+	struct pcie_port *pp = bus->sysdata;
+
+	/* Bus parent is the PCI bridge, its parent is this platform driver */
+	if (!bus->dev.parent || !bus->dev.parent->parent)
+		return;
+
+	/* Make sure we only quirk devices associated with this driver */
+	if (bus->dev.parent->parent->driver != &imx6_pcie_driver.driver)
+		return;
+
+	if (bus->number == pp->root_bus_nr) {
+		struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+		struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+
+		/*
+		 * Limit config length to avoid the kernel reading beyond
+		 * the register set and causing an abort on i.MX 6Quad
+		 */
+		if (imx6_pcie->drvdata->dbi_length) {
+			dev->cfg_size = imx6_pcie->drvdata->dbi_length;
+			dev_info(&dev->dev, "Limiting cfg_size to %d\n",
+					dev->cfg_size);
+		}
+	}
+}
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd,
+			PCI_CLASS_BRIDGE_PCI, 8, imx6_pcie_quirk);
+
+static int __init imx6_pcie_init(void)
+{
+#ifdef CONFIG_ARM
+	struct device_node *np;
+
+	np = of_find_matching_node(NULL, imx6_pcie_of_match);
+	if (!np)
+		return -ENODEV;
+	of_node_put(np);
+
+	/*
+	 * Since probe() can be deferred we need to make sure that
+	 * hook_fault_code is not called after __init memory is freed
+	 * by kernel and since imx6q_pcie_abort_handler() is a no-op,
+	 * we can install the handler here without risking it
+	 * accessing some uninitialized driver state.
+	 */
+	hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0,
+			"external abort on non-linefetch");
+#endif
+
+	return platform_driver_register(&imx6_pcie_driver);
+}
+device_initcall(imx6_pcie_init);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pci-keystone.c b/marvell/linux/drivers/pci/controller/dwc/pci-keystone.c
new file mode 100644
index 0000000..b28d3c4
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pci-keystone.c
@@ -0,0 +1,1485 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Texas Instruments Keystone SoCs
+ *
+ * Copyright (C) 2013-2014 Texas Instruments., Ltd.
+ *		http://www.ti.com
+ *
+ * Author: Murali Karicheri <m-karicheri2@ti.com>
+ * Implementation based on pci-exynos.c and pcie-designware.c
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/syscon.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+#define PCIE_VENDORID_MASK	0xffff
+#define PCIE_DEVICEID_SHIFT	16
+
+/* Application registers */
+#define PID				0x000
+#define RTL				GENMASK(15, 11)
+#define RTL_SHIFT			11
+#define AM6_PCI_PG1_RTL_VER		0x15
+
+#define CMD_STATUS			0x004
+#define LTSSM_EN_VAL		        BIT(0)
+#define OB_XLAT_EN_VAL		        BIT(1)
+#define DBI_CS2				BIT(5)
+
+#define CFG_SETUP			0x008
+#define CFG_BUS(x)			(((x) & 0xff) << 16)
+#define CFG_DEVICE(x)			(((x) & 0x1f) << 8)
+#define CFG_FUNC(x)			((x) & 0x7)
+#define CFG_TYPE1			BIT(24)
+
+#define OB_SIZE				0x030
+#define OB_OFFSET_INDEX(n)		(0x200 + (8 * (n)))
+#define OB_OFFSET_HI(n)			(0x204 + (8 * (n)))
+#define OB_ENABLEN			BIT(0)
+#define OB_WIN_SIZE			8	/* 8MB */
+
+#define PCIE_LEGACY_IRQ_ENABLE_SET(n)	(0x188 + (0x10 * ((n) - 1)))
+#define PCIE_LEGACY_IRQ_ENABLE_CLR(n)	(0x18c + (0x10 * ((n) - 1)))
+#define PCIE_EP_IRQ_SET			0x64
+#define PCIE_EP_IRQ_CLR			0x68
+#define INT_ENABLE			BIT(0)
+
+/* IRQ register defines */
+#define IRQ_EOI				0x050
+
+#define MSI_IRQ				0x054
+#define MSI_IRQ_STATUS(n)		(0x104 + ((n) << 4))
+#define MSI_IRQ_ENABLE_SET(n)		(0x108 + ((n) << 4))
+#define MSI_IRQ_ENABLE_CLR(n)		(0x10c + ((n) << 4))
+#define MSI_IRQ_OFFSET			4
+
+#define IRQ_STATUS(n)			(0x184 + ((n) << 4))
+#define IRQ_ENABLE_SET(n)		(0x188 + ((n) << 4))
+#define INTx_EN				BIT(0)
+
+#define ERR_IRQ_STATUS			0x1c4
+#define ERR_IRQ_ENABLE_SET		0x1c8
+#define ERR_AER				BIT(5)	/* ECRC error */
+#define AM6_ERR_AER			BIT(4)	/* AM6 ECRC error */
+#define ERR_AXI				BIT(4)	/* AXI tag lookup fatal error */
+#define ERR_CORR			BIT(3)	/* Correctable error */
+#define ERR_NONFATAL			BIT(2)	/* Non-fatal error */
+#define ERR_FATAL			BIT(1)	/* Fatal error */
+#define ERR_SYS				BIT(0)	/* System error */
+#define ERR_IRQ_ALL			(ERR_AER | ERR_AXI | ERR_CORR | \
+					 ERR_NONFATAL | ERR_FATAL | ERR_SYS)
+
+/* PCIE controller device IDs */
+#define PCIE_RC_K2HK			0xb008
+#define PCIE_RC_K2E			0xb009
+#define PCIE_RC_K2L			0xb00a
+#define PCIE_RC_K2G			0xb00b
+
+#define KS_PCIE_DEV_TYPE_MASK		(0x3 << 1)
+#define KS_PCIE_DEV_TYPE(mode)		((mode) << 1)
+
+#define EP				0x0
+#define LEG_EP				0x1
+#define RC				0x2
+
+#define EXP_CAP_ID_OFFSET		0x70
+
+#define KS_PCIE_SYSCLOCKOUTEN		BIT(0)
+
+#define AM654_PCIE_DEV_TYPE_MASK	0x3
+#define AM654_WIN_SIZE			SZ_64K
+
+#define APP_ADDR_SPACE_0		(16 * SZ_1K)
+
+#define to_keystone_pcie(x)		dev_get_drvdata((x)->dev)
+
+#define PCI_DEVICE_ID_TI_AM654X		0xb00c
+
+struct ks_pcie_of_data {
+	enum dw_pcie_device_mode mode;
+	const struct dw_pcie_host_ops *host_ops;
+	const struct dw_pcie_ep_ops *ep_ops;
+	unsigned int version;
+};
+
+struct keystone_pcie {
+	struct dw_pcie		*pci;
+	/* PCI Device ID */
+	u32			device_id;
+	int			legacy_host_irqs[PCI_NUM_INTX];
+	struct			device_node *legacy_intc_np;
+
+	int			msi_host_irq;
+	int			num_lanes;
+	u32			num_viewport;
+	struct phy		**phy;
+	struct device_link	**link;
+	struct			device_node *msi_intc_np;
+	struct irq_domain	*legacy_irq_domain;
+	struct device_node	*np;
+
+	/* Application register space */
+	void __iomem		*va_app_base;	/* DT 1st resource */
+	struct resource		app;
+	bool			is_am6;
+};
+
+static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
+{
+	return readl(ks_pcie->va_app_base + offset);
+}
+
+static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset,
+			       u32 val)
+{
+	writel(val, ks_pcie->va_app_base + offset);
+}
+
+static void ks_pcie_msi_irq_ack(struct irq_data *data)
+{
+	struct pcie_port *pp  = irq_data_get_irq_chip_data(data);
+	struct keystone_pcie *ks_pcie;
+	u32 irq = data->hwirq;
+	struct dw_pcie *pci;
+	u32 reg_offset;
+	u32 bit_pos;
+
+	pci = to_dw_pcie_from_pp(pp);
+	ks_pcie = to_keystone_pcie(pci);
+
+	reg_offset = irq % 8;
+	bit_pos = irq >> 3;
+
+	ks_pcie_app_writel(ks_pcie, MSI_IRQ_STATUS(reg_offset),
+			   BIT(bit_pos));
+	ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
+}
+
+static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+	struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+	struct keystone_pcie *ks_pcie;
+	struct dw_pcie *pci;
+	u64 msi_target;
+
+	pci = to_dw_pcie_from_pp(pp);
+	ks_pcie = to_keystone_pcie(pci);
+
+	msi_target = ks_pcie->app.start + MSI_IRQ;
+	msg->address_lo = lower_32_bits(msi_target);
+	msg->address_hi = upper_32_bits(msi_target);
+	msg->data = data->hwirq;
+
+	dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
+		(int)data->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static int ks_pcie_msi_set_affinity(struct irq_data *irq_data,
+				    const struct cpumask *mask, bool force)
+{
+	return -EINVAL;
+}
+
+static void ks_pcie_msi_mask(struct irq_data *data)
+{
+	struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+	struct keystone_pcie *ks_pcie;
+	u32 irq = data->hwirq;
+	struct dw_pcie *pci;
+	unsigned long flags;
+	u32 reg_offset;
+	u32 bit_pos;
+
+	raw_spin_lock_irqsave(&pp->lock, flags);
+
+	pci = to_dw_pcie_from_pp(pp);
+	ks_pcie = to_keystone_pcie(pci);
+
+	reg_offset = irq % 8;
+	bit_pos = irq >> 3;
+
+	ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_CLR(reg_offset),
+			   BIT(bit_pos));
+
+	raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static void ks_pcie_msi_unmask(struct irq_data *data)
+{
+	struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+	struct keystone_pcie *ks_pcie;
+	u32 irq = data->hwirq;
+	struct dw_pcie *pci;
+	unsigned long flags;
+	u32 reg_offset;
+	u32 bit_pos;
+
+	raw_spin_lock_irqsave(&pp->lock, flags);
+
+	pci = to_dw_pcie_from_pp(pp);
+	ks_pcie = to_keystone_pcie(pci);
+
+	reg_offset = irq % 8;
+	bit_pos = irq >> 3;
+
+	ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_SET(reg_offset),
+			   BIT(bit_pos));
+
+	raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static struct irq_chip ks_pcie_msi_irq_chip = {
+	.name = "KEYSTONE-PCI-MSI",
+	.irq_ack = ks_pcie_msi_irq_ack,
+	.irq_compose_msi_msg = ks_pcie_compose_msi_msg,
+	.irq_set_affinity = ks_pcie_msi_set_affinity,
+	.irq_mask = ks_pcie_msi_mask,
+	.irq_unmask = ks_pcie_msi_unmask,
+};
+
+static int ks_pcie_msi_host_init(struct pcie_port *pp)
+{
+	pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
+	return dw_pcie_allocate_domains(pp);
+}
+
+static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
+				      int offset)
+{
+	struct dw_pcie *pci = ks_pcie->pci;
+	struct device *dev = pci->dev;
+	u32 pending;
+	int virq;
+
+	pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset));
+
+	if (BIT(0) & pending) {
+		virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
+		dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
+		generic_handle_irq(virq);
+	}
+
+	/* EOI the INTx interrupt */
+	ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset);
+}
+
+/*
+ * Dummy function so that DW core doesn't configure MSI
+ */
+static int ks_pcie_am654_msi_host_init(struct pcie_port *pp)
+{
+	return 0;
+}
+
+static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
+{
+	ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
+}
+
+static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
+{
+	u32 reg;
+	struct device *dev = ks_pcie->pci->dev;
+
+	reg = ks_pcie_app_readl(ks_pcie, ERR_IRQ_STATUS);
+	if (!reg)
+		return IRQ_NONE;
+
+	if (reg & ERR_SYS)
+		dev_err(dev, "System Error\n");
+
+	if (reg & ERR_FATAL)
+		dev_err(dev, "Fatal Error\n");
+
+	if (reg & ERR_NONFATAL)
+		dev_dbg(dev, "Non Fatal Error\n");
+
+	if (reg & ERR_CORR)
+		dev_dbg(dev, "Correctable Error\n");
+
+	if (!ks_pcie->is_am6 && (reg & ERR_AXI))
+		dev_err(dev, "AXI tag lookup fatal Error\n");
+
+	if (reg & ERR_AER || (ks_pcie->is_am6 && (reg & AM6_ERR_AER)))
+		dev_err(dev, "ECRC Error\n");
+
+	ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg);
+
+	return IRQ_HANDLED;
+}
+
+static void ks_pcie_ack_legacy_irq(struct irq_data *d)
+{
+}
+
+static void ks_pcie_mask_legacy_irq(struct irq_data *d)
+{
+}
+
+static void ks_pcie_unmask_legacy_irq(struct irq_data *d)
+{
+}
+
+static struct irq_chip ks_pcie_legacy_irq_chip = {
+	.name = "Keystone-PCI-Legacy-IRQ",
+	.irq_ack = ks_pcie_ack_legacy_irq,
+	.irq_mask = ks_pcie_mask_legacy_irq,
+	.irq_unmask = ks_pcie_unmask_legacy_irq,
+};
+
+static int ks_pcie_init_legacy_irq_map(struct irq_domain *d,
+				       unsigned int irq,
+				       irq_hw_number_t hw_irq)
+{
+	irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip,
+				 handle_level_irq);
+	irq_set_chip_data(irq, d->host_data);
+
+	return 0;
+}
+
+static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = {
+	.map = ks_pcie_init_legacy_irq_map,
+	.xlate = irq_domain_xlate_onetwocell,
+};
+
+/**
+ * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
+ * registers
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
+{
+	u32 val;
+
+	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+	val |= DBI_CS2;
+	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+	do {
+		val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+	} while (!(val & DBI_CS2));
+}
+
+/**
+ * ks_pcie_clear_dbi_mode() - Disable DBI mode
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
+{
+	u32 val;
+
+	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+	val &= ~DBI_CS2;
+	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+	do {
+		val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+	} while (val & DBI_CS2);
+}
+
+static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
+{
+	u32 val;
+	u32 num_viewport = ks_pcie->num_viewport;
+	struct dw_pcie *pci = ks_pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	u64 start = pp->mem->start;
+	u64 end = pp->mem->end;
+	int i;
+
+	/* Disable BARs for inbound access */
+	ks_pcie_set_dbi_mode(ks_pcie);
+	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
+	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
+	ks_pcie_clear_dbi_mode(ks_pcie);
+
+	if (ks_pcie->is_am6)
+		return;
+
+	val = ilog2(OB_WIN_SIZE);
+	ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
+
+	/* Using Direct 1:1 mapping of RC <-> PCI memory space */
+	for (i = 0; i < num_viewport && (start < end); i++) {
+		ks_pcie_app_writel(ks_pcie, OB_OFFSET_INDEX(i),
+				   lower_32_bits(start) | OB_ENABLEN);
+		ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i),
+				   upper_32_bits(start));
+		start += OB_WIN_SIZE * SZ_1M;
+	}
+
+	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+	val |= OB_XLAT_EN_VAL;
+	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+}
+
+static int ks_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+				 unsigned int devfn, int where, int size,
+				 u32 *val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+	u32 reg;
+
+	reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
+		CFG_FUNC(PCI_FUNC(devfn));
+	if (bus->parent->number != pp->root_bus_nr)
+		reg |= CFG_TYPE1;
+	ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
+
+	return dw_pcie_read(pp->va_cfg0_base + where, size, val);
+}
+
+static int ks_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+				 unsigned int devfn, int where, int size,
+				 u32 val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+	u32 reg;
+
+	reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
+		CFG_FUNC(PCI_FUNC(devfn));
+	if (bus->parent->number != pp->root_bus_nr)
+		reg |= CFG_TYPE1;
+	ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
+
+	return dw_pcie_write(pp->va_cfg0_base + where, size, val);
+}
+
+/**
+ * ks_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
+ *
+ * This sets BAR0 to enable inbound access for MSI_IRQ register
+ */
+static void ks_pcie_v3_65_scan_bus(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+	/* Configure and set up BAR0 */
+	ks_pcie_set_dbi_mode(ks_pcie);
+
+	/* Enable BAR0 */
+	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
+	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
+
+	ks_pcie_clear_dbi_mode(ks_pcie);
+
+	 /*
+	  * For BAR0, just setting bus address for inbound writes (MSI) should
+	  * be sufficient.  Use physical address to avoid any conflicts.
+	  */
+	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
+}
+
+/**
+ * ks_pcie_link_up() - Check if link up
+ */
+static int ks_pcie_link_up(struct dw_pcie *pci)
+{
+	u32 val;
+
+	val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
+	val &= PORT_LOGIC_LTSSM_STATE_MASK;
+	return (val == PORT_LOGIC_LTSSM_STATE_L0);
+}
+
+static void ks_pcie_stop_link(struct dw_pcie *pci)
+{
+	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+	u32 val;
+
+	/* Disable Link training */
+	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+	val &= ~LTSSM_EN_VAL;
+	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+}
+
+static int ks_pcie_start_link(struct dw_pcie *pci)
+{
+	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+	struct device *dev = pci->dev;
+	u32 val;
+
+	if (dw_pcie_link_up(pci)) {
+		dev_dbg(dev, "link is already up\n");
+		return 0;
+	}
+
+	/* Initiate Link Training */
+	val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+	ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
+
+	return 0;
+}
+
+static void ks_pcie_quirk(struct pci_dev *dev)
+{
+	struct pci_bus *bus = dev->bus;
+	struct keystone_pcie *ks_pcie;
+	struct device *bridge_dev;
+	struct pci_dev *bridge;
+	u32 val;
+
+	static const struct pci_device_id rc_pci_devids[] = {
+		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
+		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E),
+		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
+		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G),
+		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+		{ 0, },
+	};
+	static const struct pci_device_id am6_pci_devids[] = {
+		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654X),
+		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+		{ 0, },
+	};
+
+	if (pci_is_root_bus(bus))
+		bridge = dev;
+
+	/* look for the host bridge */
+	while (!pci_is_root_bus(bus)) {
+		bridge = bus->self;
+		bus = bus->parent;
+	}
+
+	if (!bridge)
+		return;
+
+	/*
+	 * Keystone PCI controller has a h/w limitation of
+	 * 256 bytes maximum read request size.  It can't handle
+	 * anything higher than this.  So force this limit on
+	 * all downstream devices.
+	 */
+	if (pci_match_id(rc_pci_devids, bridge)) {
+		if (pcie_get_readrq(dev) > 256) {
+			dev_info(&dev->dev, "limiting MRRS to 256 bytes\n");
+			pcie_set_readrq(dev, 256);
+		}
+	}
+
+	/*
+	 * Memory transactions fail with PCI controller in AM654 PG1.0
+	 * when MRRS is set to more than 128 bytes. Force the MRRS to
+	 * 128 bytes in all downstream devices.
+	 */
+	if (pci_match_id(am6_pci_devids, bridge)) {
+		bridge_dev = pci_get_host_bridge_device(dev);
+		if (!bridge_dev || !bridge_dev->parent)
+			return;
+
+		ks_pcie = dev_get_drvdata(bridge_dev->parent);
+		if (!ks_pcie)
+			return;
+
+		val = ks_pcie_app_readl(ks_pcie, PID);
+		val &= RTL;
+		val >>= RTL_SHIFT;
+		if (val != AM6_PCI_PG1_RTL_VER)
+			return;
+
+		if (pcie_get_readrq(dev) > 128) {
+			dev_info(&dev->dev, "limiting MRRS to 128 bytes\n");
+			pcie_set_readrq(dev, 128);
+		}
+	}
+}
+DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
+
+static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
+{
+	unsigned int irq = desc->irq_data.hwirq;
+	struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
+	u32 offset = irq - ks_pcie->msi_host_irq;
+	struct dw_pcie *pci = ks_pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = pci->dev;
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	u32 vector, virq, reg, pos;
+
+	dev_dbg(dev, "%s, irq %d\n", __func__, irq);
+
+	/*
+	 * The chained irq handler installation would have replaced normal
+	 * interrupt driver handler so we need to take care of mask/unmask and
+	 * ack operation.
+	 */
+	chained_irq_enter(chip, desc);
+
+	reg = ks_pcie_app_readl(ks_pcie, MSI_IRQ_STATUS(offset));
+	/*
+	 * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
+	 * shows 1, 9, 17, 25 and so forth
+	 */
+	for (pos = 0; pos < 4; pos++) {
+		if (!(reg & BIT(pos)))
+			continue;
+
+		vector = offset + (pos << 3);
+		virq = irq_linear_revmap(pp->irq_domain, vector);
+		dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", pos, vector,
+			virq);
+		generic_handle_irq(virq);
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+/**
+ * ks_pcie_legacy_irq_handler() - Handle legacy interrupt
+ * @irq: IRQ line for legacy interrupts
+ * @desc: Pointer to irq descriptor
+ *
+ * Traverse through pending legacy interrupts and invoke handler for each. Also
+ * takes care of interrupt controller level mask/ack operation.
+ */
+static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
+{
+	unsigned int irq = irq_desc_get_irq(desc);
+	struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
+	struct dw_pcie *pci = ks_pcie->pci;
+	struct device *dev = pci->dev;
+	u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+
+	dev_dbg(dev, ": Handling legacy irq %d\n", irq);
+
+	/*
+	 * The chained irq handler installation would have replaced normal
+	 * interrupt driver handler so we need to take care of mask/unmask and
+	 * ack operation.
+	 */
+	chained_irq_enter(chip, desc);
+	ks_pcie_handle_legacy_irq(ks_pcie, irq_offset);
+	chained_irq_exit(chip, desc);
+}
+
+static int ks_pcie_config_msi_irq(struct keystone_pcie *ks_pcie)
+{
+	struct device *dev = ks_pcie->pci->dev;
+	struct device_node *np = ks_pcie->np;
+	struct device_node *intc_np;
+	struct irq_data *irq_data;
+	int irq_count, irq, ret, i;
+
+	if (!IS_ENABLED(CONFIG_PCI_MSI))
+		return 0;
+
+	intc_np = of_get_child_by_name(np, "msi-interrupt-controller");
+	if (!intc_np) {
+		if (ks_pcie->is_am6)
+			return 0;
+		dev_warn(dev, "msi-interrupt-controller node is absent\n");
+		return -EINVAL;
+	}
+
+	irq_count = of_irq_count(intc_np);
+	if (!irq_count) {
+		dev_err(dev, "No IRQ entries in msi-interrupt-controller\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	for (i = 0; i < irq_count; i++) {
+		irq = irq_of_parse_and_map(intc_np, i);
+		if (!irq) {
+			ret = -EINVAL;
+			goto err;
+		}
+
+		if (!ks_pcie->msi_host_irq) {
+			irq_data = irq_get_irq_data(irq);
+			if (!irq_data) {
+				ret = -EINVAL;
+				goto err;
+			}
+			ks_pcie->msi_host_irq = irq_data->hwirq;
+		}
+
+		irq_set_chained_handler_and_data(irq, ks_pcie_msi_irq_handler,
+						 ks_pcie);
+	}
+
+	of_node_put(intc_np);
+	return 0;
+
+err:
+	of_node_put(intc_np);
+	return ret;
+}
+
+static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
+{
+	struct device *dev = ks_pcie->pci->dev;
+	struct irq_domain *legacy_irq_domain;
+	struct device_node *np = ks_pcie->np;
+	struct device_node *intc_np;
+	int irq_count, irq, ret = 0, i;
+
+	intc_np = of_get_child_by_name(np, "legacy-interrupt-controller");
+	if (!intc_np) {
+		/*
+		 * Since legacy interrupts are modeled as edge-interrupts in
+		 * AM6, keep it disabled for now.
+		 */
+		if (ks_pcie->is_am6)
+			return 0;
+		dev_warn(dev, "legacy-interrupt-controller node is absent\n");
+		return -EINVAL;
+	}
+
+	irq_count = of_irq_count(intc_np);
+	if (!irq_count) {
+		dev_err(dev, "No IRQ entries in legacy-interrupt-controller\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	for (i = 0; i < irq_count; i++) {
+		irq = irq_of_parse_and_map(intc_np, i);
+		if (!irq) {
+			ret = -EINVAL;
+			goto err;
+		}
+		ks_pcie->legacy_host_irqs[i] = irq;
+
+		irq_set_chained_handler_and_data(irq,
+						 ks_pcie_legacy_irq_handler,
+						 ks_pcie);
+	}
+
+	legacy_irq_domain =
+		irq_domain_add_linear(intc_np, PCI_NUM_INTX,
+				      &ks_pcie_legacy_irq_domain_ops, NULL);
+	if (!legacy_irq_domain) {
+		dev_err(dev, "Failed to add irq domain for legacy irqs\n");
+		ret = -EINVAL;
+		goto err;
+	}
+	ks_pcie->legacy_irq_domain = legacy_irq_domain;
+
+	for (i = 0; i < PCI_NUM_INTX; i++)
+		ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN);
+
+err:
+	of_node_put(intc_np);
+	return ret;
+}
+
+#ifdef CONFIG_ARM
+/*
+ * When a PCI device does not exist during config cycles, keystone host gets a
+ * bus error instead of returning 0xffffffff. This handler always returns 0
+ * for this kind of faults.
+ */
+static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
+			 struct pt_regs *regs)
+{
+	unsigned long instr = *(unsigned long *) instruction_pointer(regs);
+
+	if ((instr & 0x0e100090) == 0x00100090) {
+		int reg = (instr >> 12) & 15;
+
+		regs->uregs[reg] = -1;
+		regs->ARM_pc += 4;
+	}
+
+	return 0;
+}
+#endif
+
+static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
+{
+	int ret;
+	unsigned int id;
+	struct regmap *devctrl_regs;
+	struct dw_pcie *pci = ks_pcie->pci;
+	struct device *dev = pci->dev;
+	struct device_node *np = dev->of_node;
+
+	devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-id");
+	if (IS_ERR(devctrl_regs))
+		return PTR_ERR(devctrl_regs);
+
+	ret = regmap_read(devctrl_regs, 0, &id);
+	if (ret)
+		return ret;
+
+	dw_pcie_dbi_ro_wr_en(pci);
+	dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK);
+	dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT);
+	dw_pcie_dbi_ro_wr_dis(pci);
+
+	return 0;
+}
+
+static int __init ks_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+	int ret;
+
+	ret = ks_pcie_config_legacy_irq(ks_pcie);
+	if (ret)
+		return ret;
+
+	ret = ks_pcie_config_msi_irq(ks_pcie);
+	if (ret)
+		return ret;
+
+	dw_pcie_setup_rc(pp);
+
+	ks_pcie_stop_link(pci);
+	ks_pcie_setup_rc_app_regs(ks_pcie);
+	writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
+			pci->dbi_base + PCI_IO_BASE);
+
+	ret = ks_pcie_init_id(ks_pcie);
+	if (ret < 0)
+		return ret;
+
+#ifdef CONFIG_ARM
+	/*
+	 * PCIe access errors that result into OCP errors are caught by ARM as
+	 * "External aborts"
+	 */
+	hook_fault_code(17, ks_pcie_fault, SIGBUS, 0,
+			"Asynchronous external abort");
+#endif
+
+	ks_pcie_start_link(pci);
+	dw_pcie_wait_for_link(pci);
+
+	return 0;
+}
+
+static const struct dw_pcie_host_ops ks_pcie_host_ops = {
+	.rd_other_conf = ks_pcie_rd_other_conf,
+	.wr_other_conf = ks_pcie_wr_other_conf,
+	.host_init = ks_pcie_host_init,
+	.msi_host_init = ks_pcie_msi_host_init,
+	.scan_bus = ks_pcie_v3_65_scan_bus,
+};
+
+static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = {
+	.host_init = ks_pcie_host_init,
+	.msi_host_init = ks_pcie_am654_msi_host_init,
+};
+
+static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
+{
+	struct keystone_pcie *ks_pcie = priv;
+
+	return ks_pcie_handle_error_irq(ks_pcie);
+}
+
+static int ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,
+				 struct platform_device *pdev)
+{
+	struct dw_pcie *pci = ks_pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	int ret;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
+	pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
+	if (IS_ERR(pp->va_cfg0_base))
+		return PTR_ERR(pp->va_cfg0_base);
+
+	pp->va_cfg1_base = pp->va_cfg0_base;
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "failed to initialize host\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static u32 ks_pcie_am654_read_dbi2(struct dw_pcie *pci, void __iomem *base,
+				   u32 reg, size_t size)
+{
+	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+	u32 val;
+
+	ks_pcie_set_dbi_mode(ks_pcie);
+	dw_pcie_read(base + reg, size, &val);
+	ks_pcie_clear_dbi_mode(ks_pcie);
+	return val;
+}
+
+static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base,
+				     u32 reg, size_t size, u32 val)
+{
+	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+	ks_pcie_set_dbi_mode(ks_pcie);
+	dw_pcie_write(base + reg, size, val);
+	ks_pcie_clear_dbi_mode(ks_pcie);
+}
+
+static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = {
+	.start_link = ks_pcie_start_link,
+	.stop_link = ks_pcie_stop_link,
+	.link_up = ks_pcie_link_up,
+	.read_dbi2 = ks_pcie_am654_read_dbi2,
+	.write_dbi2 = ks_pcie_am654_write_dbi2,
+};
+
+static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	int flags;
+
+	ep->page_size = AM654_WIN_SIZE;
+	flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32;
+	dw_pcie_writel_dbi2(pci, PCI_BASE_ADDRESS_0, APP_ADDR_SPACE_0 - 1);
+	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags);
+}
+
+static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie)
+{
+	struct dw_pcie *pci = ks_pcie->pci;
+	u8 int_pin;
+
+	int_pin = dw_pcie_readb_dbi(pci, PCI_INTERRUPT_PIN);
+	if (int_pin == 0 || int_pin > 4)
+		return;
+
+	ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_SET(int_pin),
+			   INT_ENABLE);
+	ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_SET, INT_ENABLE);
+	mdelay(1);
+	ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_CLR, INT_ENABLE);
+	ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_CLR(int_pin),
+			   INT_ENABLE);
+}
+
+static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+				   enum pci_epc_irq_type type,
+				   u16 interrupt_num)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+	switch (type) {
+	case PCI_EPC_IRQ_LEGACY:
+		ks_pcie_am654_raise_legacy_irq(ks_pcie);
+		break;
+	case PCI_EPC_IRQ_MSI:
+		dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+		break;
+	default:
+		dev_err(pci->dev, "UNKNOWN IRQ type\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static const struct pci_epc_features ks_pcie_am654_epc_features = {
+	.linkup_notifier = false,
+	.msi_capable = true,
+	.msix_capable = false,
+	.reserved_bar = 1 << BAR_0 | 1 << BAR_1,
+	.bar_fixed_64bit = 1 << BAR_0,
+	.bar_fixed_size[2] = SZ_1M,
+	.bar_fixed_size[3] = SZ_64K,
+	.bar_fixed_size[4] = 256,
+	.bar_fixed_size[5] = SZ_1M,
+	.align = SZ_1M,
+};
+
+static const struct pci_epc_features*
+ks_pcie_am654_get_features(struct dw_pcie_ep *ep)
+{
+	return &ks_pcie_am654_epc_features;
+}
+
+static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
+	.ep_init = ks_pcie_am654_ep_init,
+	.raise_irq = ks_pcie_am654_raise_irq,
+	.get_features = &ks_pcie_am654_get_features,
+};
+
+static int ks_pcie_add_pcie_ep(struct keystone_pcie *ks_pcie,
+			       struct platform_device *pdev)
+{
+	int ret;
+	struct dw_pcie_ep *ep;
+	struct resource *res;
+	struct device *dev = &pdev->dev;
+	struct dw_pcie *pci = ks_pcie->pci;
+
+	ep = &pci->ep;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+	if (!res)
+		return -EINVAL;
+
+	ep->phys_base = res->start;
+	ep->addr_size = resource_size(res);
+
+	ret = dw_pcie_ep_init(ep);
+	if (ret) {
+		dev_err(dev, "failed to initialize endpoint\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie)
+{
+	int num_lanes = ks_pcie->num_lanes;
+
+	while (num_lanes--) {
+		phy_power_off(ks_pcie->phy[num_lanes]);
+		phy_exit(ks_pcie->phy[num_lanes]);
+	}
+}
+
+static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie)
+{
+	int i;
+	int ret;
+	int num_lanes = ks_pcie->num_lanes;
+
+	for (i = 0; i < num_lanes; i++) {
+		ret = phy_reset(ks_pcie->phy[i]);
+		if (ret < 0)
+			goto err_phy;
+
+		ret = phy_init(ks_pcie->phy[i]);
+		if (ret < 0)
+			goto err_phy;
+
+		ret = phy_power_on(ks_pcie->phy[i]);
+		if (ret < 0) {
+			phy_exit(ks_pcie->phy[i]);
+			goto err_phy;
+		}
+	}
+
+	return 0;
+
+err_phy:
+	while (--i >= 0) {
+		phy_power_off(ks_pcie->phy[i]);
+		phy_exit(ks_pcie->phy[i]);
+	}
+
+	return ret;
+}
+
+static int ks_pcie_set_mode(struct device *dev)
+{
+	struct device_node *np = dev->of_node;
+	struct regmap *syscon;
+	u32 val;
+	u32 mask;
+	int ret = 0;
+
+	syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
+	if (IS_ERR(syscon))
+		return 0;
+
+	mask = KS_PCIE_DEV_TYPE_MASK | KS_PCIE_SYSCLOCKOUTEN;
+	val = KS_PCIE_DEV_TYPE(RC) | KS_PCIE_SYSCLOCKOUTEN;
+
+	ret = regmap_update_bits(syscon, 0, mask, val);
+	if (ret) {
+		dev_err(dev, "failed to set pcie mode\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ks_pcie_am654_set_mode(struct device *dev,
+				  enum dw_pcie_device_mode mode)
+{
+	struct device_node *np = dev->of_node;
+	struct regmap *syscon;
+	u32 val;
+	u32 mask;
+	int ret = 0;
+
+	syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
+	if (IS_ERR(syscon))
+		return 0;
+
+	mask = AM654_PCIE_DEV_TYPE_MASK;
+
+	switch (mode) {
+	case DW_PCIE_RC_TYPE:
+		val = RC;
+		break;
+	case DW_PCIE_EP_TYPE:
+		val = EP;
+		break;
+	default:
+		dev_err(dev, "INVALID device type %d\n", mode);
+		return -EINVAL;
+	}
+
+	ret = regmap_update_bits(syscon, 0, mask, val);
+	if (ret) {
+		dev_err(dev, "failed to set pcie mode\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static void ks_pcie_set_link_speed(struct dw_pcie *pci, int link_speed)
+{
+	u32 val;
+
+	dw_pcie_dbi_ro_wr_en(pci);
+
+	val = dw_pcie_readl_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCAP);
+	if ((val & PCI_EXP_LNKCAP_SLS) != link_speed) {
+		val &= ~((u32)PCI_EXP_LNKCAP_SLS);
+		val |= link_speed;
+		dw_pcie_writel_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCAP,
+				   val);
+	}
+
+	val = dw_pcie_readl_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCTL2);
+	if ((val & PCI_EXP_LNKCAP_SLS) != link_speed) {
+		val &= ~((u32)PCI_EXP_LNKCAP_SLS);
+		val |= link_speed;
+		dw_pcie_writel_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCTL2,
+				   val);
+	}
+
+	dw_pcie_dbi_ro_wr_dis(pci);
+}
+
+static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
+	.host_ops = &ks_pcie_host_ops,
+	.version = 0x365A,
+};
+
+static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = {
+	.host_ops = &ks_pcie_am654_host_ops,
+	.mode = DW_PCIE_RC_TYPE,
+	.version = 0x490A,
+};
+
+static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = {
+	.ep_ops = &ks_pcie_am654_ep_ops,
+	.mode = DW_PCIE_EP_TYPE,
+	.version = 0x490A,
+};
+
+static const struct of_device_id ks_pcie_of_match[] = {
+	{
+		.type = "pci",
+		.data = &ks_pcie_rc_of_data,
+		.compatible = "ti,keystone-pcie",
+	},
+	{
+		.data = &ks_pcie_am654_rc_of_data,
+		.compatible = "ti,am654-pcie-rc",
+	},
+	{
+		.data = &ks_pcie_am654_ep_of_data,
+		.compatible = "ti,am654-pcie-ep",
+	},
+	{ },
+};
+
+static int ks_pcie_probe(struct platform_device *pdev)
+{
+	const struct dw_pcie_host_ops *host_ops;
+	const struct dw_pcie_ep_ops *ep_ops;
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	const struct ks_pcie_of_data *data;
+	const struct of_device_id *match;
+	enum dw_pcie_device_mode mode;
+	struct dw_pcie *pci;
+	struct keystone_pcie *ks_pcie;
+	struct device_link **link;
+	struct gpio_desc *gpiod;
+	void __iomem *atu_base;
+	struct resource *res;
+	unsigned int version;
+	void __iomem *base;
+	u32 num_viewport;
+	struct phy **phy;
+	int link_speed;
+	u32 num_lanes;
+	char name[10];
+	int ret;
+	int irq;
+	int i;
+
+	match = of_match_device(of_match_ptr(ks_pcie_of_match), dev);
+	data = (struct ks_pcie_of_data *)match->data;
+	if (!data)
+		return -EINVAL;
+
+	version = data->version;
+	host_ops = data->host_ops;
+	ep_ops = data->ep_ops;
+	mode = data->mode;
+
+	ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
+	if (!ks_pcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app");
+	ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(ks_pcie->va_app_base))
+		return PTR_ERR(ks_pcie->va_app_base);
+
+	ks_pcie->app = *res;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbics");
+	base = devm_pci_remap_cfg_resource(dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	if (of_device_is_compatible(np, "ti,am654-pcie-rc"))
+		ks_pcie->is_am6 = true;
+
+	pci->dbi_base = base;
+	pci->dbi_base2 = base;
+	pci->dev = dev;
+	pci->ops = &ks_pcie_dw_pcie_ops;
+	pci->version = version;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(dev, "missing IRQ resource: %d\n", irq);
+		return irq;
+	}
+
+	ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED,
+			  "ks-pcie-error-irq", ks_pcie);
+	if (ret < 0) {
+		dev_err(dev, "failed to request error IRQ %d\n",
+			irq);
+		return ret;
+	}
+
+	ret = of_property_read_u32(np, "num-lanes", &num_lanes);
+	if (ret)
+		num_lanes = 1;
+
+	phy = devm_kzalloc(dev, sizeof(*phy) * num_lanes, GFP_KERNEL);
+	if (!phy)
+		return -ENOMEM;
+
+	link = devm_kzalloc(dev, sizeof(*link) * num_lanes, GFP_KERNEL);
+	if (!link)
+		return -ENOMEM;
+
+	for (i = 0; i < num_lanes; i++) {
+		snprintf(name, sizeof(name), "pcie-phy%d", i);
+		phy[i] = devm_phy_optional_get(dev, name);
+		if (IS_ERR(phy[i])) {
+			ret = PTR_ERR(phy[i]);
+			goto err_link;
+		}
+
+		if (!phy[i])
+			continue;
+
+		link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
+		if (!link[i]) {
+			ret = -EINVAL;
+			goto err_link;
+		}
+	}
+
+	ks_pcie->np = np;
+	ks_pcie->pci = pci;
+	ks_pcie->link = link;
+	ks_pcie->num_lanes = num_lanes;
+	ks_pcie->phy = phy;
+
+	gpiod = devm_gpiod_get_optional(dev, "reset",
+					GPIOD_OUT_LOW);
+	if (IS_ERR(gpiod)) {
+		ret = PTR_ERR(gpiod);
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "Failed to get reset GPIO\n");
+		goto err_link;
+	}
+
+	/* Obtain references to the PHYs */
+	for (i = 0; i < num_lanes; i++)
+		phy_pm_runtime_get_sync(ks_pcie->phy[i]);
+
+	ret = ks_pcie_enable_phy(ks_pcie);
+
+	/* Release references to the PHYs */
+	for (i = 0; i < num_lanes; i++)
+		phy_pm_runtime_put_sync(ks_pcie->phy[i]);
+
+	if (ret) {
+		dev_err(dev, "failed to enable phy\n");
+		goto err_link;
+	}
+
+	platform_set_drvdata(pdev, ks_pcie);
+	pm_runtime_enable(dev);
+	ret = pm_runtime_get_sync(dev);
+	if (ret < 0) {
+		dev_err(dev, "pm_runtime_get_sync failed\n");
+		goto err_get_sync;
+	}
+
+	if (pci->version >= 0x480A) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
+		atu_base = devm_ioremap_resource(dev, res);
+		if (IS_ERR(atu_base)) {
+			ret = PTR_ERR(atu_base);
+			goto err_get_sync;
+		}
+
+		pci->atu_base = atu_base;
+
+		ret = ks_pcie_am654_set_mode(dev, mode);
+		if (ret < 0)
+			goto err_get_sync;
+	} else {
+		ret = ks_pcie_set_mode(dev);
+		if (ret < 0)
+			goto err_get_sync;
+	}
+
+	link_speed = of_pci_get_max_link_speed(np);
+	if (link_speed < 0)
+		link_speed = 2;
+
+	ks_pcie_set_link_speed(pci, link_speed);
+
+	switch (mode) {
+	case DW_PCIE_RC_TYPE:
+		if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) {
+			ret = -ENODEV;
+			goto err_get_sync;
+		}
+
+		ret = of_property_read_u32(np, "num-viewport", &num_viewport);
+		if (ret < 0) {
+			dev_err(dev, "unable to read *num-viewport* property\n");
+			goto err_get_sync;
+		}
+
+		/*
+		 * "Power Sequencing and Reset Signal Timings" table in
+		 * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 2.0
+		 * indicates PERST# should be deasserted after minimum of 100us
+		 * once REFCLK is stable. The REFCLK to the connector in RC
+		 * mode is selected while enabling the PHY. So deassert PERST#
+		 * after 100 us.
+		 */
+		if (gpiod) {
+			usleep_range(100, 200);
+			gpiod_set_value_cansleep(gpiod, 1);
+		}
+
+		ks_pcie->num_viewport = num_viewport;
+		pci->pp.ops = host_ops;
+		ret = ks_pcie_add_pcie_port(ks_pcie, pdev);
+		if (ret < 0)
+			goto err_get_sync;
+		break;
+	case DW_PCIE_EP_TYPE:
+		if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_EP)) {
+			ret = -ENODEV;
+			goto err_get_sync;
+		}
+
+		pci->ep.ops = ep_ops;
+		ret = ks_pcie_add_pcie_ep(ks_pcie, pdev);
+		if (ret < 0)
+			goto err_get_sync;
+		break;
+	default:
+		dev_err(dev, "INVALID device type %d\n", mode);
+	}
+
+	ks_pcie_enable_error_irq(ks_pcie);
+
+	return 0;
+
+err_get_sync:
+	pm_runtime_put(dev);
+	pm_runtime_disable(dev);
+	ks_pcie_disable_phy(ks_pcie);
+
+err_link:
+	while (--i >= 0 && link[i])
+		device_link_del(link[i]);
+
+	return ret;
+}
+
+static int ks_pcie_remove(struct platform_device *pdev)
+{
+	struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
+	struct device_link **link = ks_pcie->link;
+	int num_lanes = ks_pcie->num_lanes;
+	struct device *dev = &pdev->dev;
+
+	pm_runtime_put(dev);
+	pm_runtime_disable(dev);
+	ks_pcie_disable_phy(ks_pcie);
+	while (num_lanes--)
+		device_link_del(link[num_lanes]);
+
+	return 0;
+}
+
+static struct platform_driver ks_pcie_driver = {
+	.probe  = ks_pcie_probe,
+	.remove = ks_pcie_remove,
+	.driver = {
+		.name	= "keystone-pcie",
+		.of_match_table = of_match_ptr(ks_pcie_of_match),
+	},
+};
+builtin_platform_driver(ks_pcie_driver);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pci-layerscape-ep.c b/marvell/linux/drivers/pci/controller/dwc/pci-layerscape-ep.c
new file mode 100644
index 0000000..ca9aa45
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe controller EP driver for Freescale Layerscape SoCs
+ *
+ * Copyright (C) 2018 NXP Semiconductor.
+ *
+ * Author: Xiaowei Bao <xiaowei.bao@nxp.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+
+#include "pcie-designware.h"
+
+#define PCIE_DBI2_OFFSET		0x1000	/* DBI2 base address*/
+
+struct ls_pcie_ep {
+	struct dw_pcie		*pci;
+};
+
+#define to_ls_pcie_ep(x)	dev_get_drvdata((x)->dev)
+
+static int ls_pcie_establish_link(struct dw_pcie *pci)
+{
+	return 0;
+}
+
+static const struct dw_pcie_ops ls_pcie_ep_ops = {
+	.start_link = ls_pcie_establish_link,
+};
+
+static const struct of_device_id ls_pcie_ep_of_match[] = {
+	{ .compatible = "fsl,ls-pcie-ep",},
+	{ },
+};
+
+static const struct pci_epc_features ls_pcie_epc_features = {
+	.linkup_notifier = false,
+	.msi_capable = true,
+	.msix_capable = false,
+	.bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4),
+};
+
+static const struct pci_epc_features*
+ls_pcie_ep_get_features(struct dw_pcie_ep *ep)
+{
+	return &ls_pcie_epc_features;
+}
+
+static void ls_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	enum pci_barno bar;
+
+	for (bar = BAR_0; bar <= BAR_5; bar++)
+		dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+				  enum pci_epc_irq_type type, u16 interrupt_num)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	switch (type) {
+	case PCI_EPC_IRQ_LEGACY:
+		return dw_pcie_ep_raise_legacy_irq(ep, func_no);
+	case PCI_EPC_IRQ_MSI:
+		return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+	case PCI_EPC_IRQ_MSIX:
+		return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+	default:
+		dev_err(pci->dev, "UNKNOWN IRQ type\n");
+		return -EINVAL;
+	}
+}
+
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+	.ep_init = ls_pcie_ep_init,
+	.raise_irq = ls_pcie_ep_raise_irq,
+	.get_features = ls_pcie_ep_get_features,
+};
+
+static int __init ls_add_pcie_ep(struct ls_pcie_ep *pcie,
+					struct platform_device *pdev)
+{
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	struct dw_pcie_ep *ep;
+	struct resource *res;
+	int ret;
+
+	ep = &pci->ep;
+	ep->ops = &pcie_ep_ops;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+	if (!res)
+		return -EINVAL;
+
+	ep->phys_base = res->start;
+	ep->addr_size = resource_size(res);
+
+	ret = dw_pcie_ep_init(ep);
+	if (ret) {
+		dev_err(dev, "failed to initialize endpoint\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int __init ls_pcie_ep_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dw_pcie *pci;
+	struct ls_pcie_ep *pcie;
+	struct resource *dbi_base;
+	int ret;
+
+	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+	if (!pcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+	pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
+	if (IS_ERR(pci->dbi_base))
+		return PTR_ERR(pci->dbi_base);
+
+	pci->dbi_base2 = pci->dbi_base + PCIE_DBI2_OFFSET;
+	pci->dev = dev;
+	pci->ops = &ls_pcie_ep_ops;
+	pcie->pci = pci;
+
+	platform_set_drvdata(pdev, pcie);
+
+	ret = ls_add_pcie_ep(pcie, pdev);
+
+	return ret;
+}
+
+static struct platform_driver ls_pcie_ep_driver = {
+	.driver = {
+		.name = "layerscape-pcie-ep",
+		.of_match_table = ls_pcie_ep_of_match,
+		.suppress_bind_attrs = true,
+	},
+};
+builtin_platform_driver_probe(ls_pcie_ep_driver, ls_pcie_ep_probe);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pci-layerscape.c b/marvell/linux/drivers/pci/controller/dwc/pci-layerscape.c
new file mode 100644
index 0000000..3a5fa26
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pci-layerscape.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Freescale Layerscape SoCs
+ *
+ * Copyright (C) 2014 Freescale Semiconductor.
+ *
+ * Author: Minghuan Lian <Minghuan.Lian@freescale.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include "pcie-designware.h"
+
+/* PEX1/2 Misc Ports Status Register */
+#define SCFG_PEXMSCPORTSR(pex_idx)	(0x94 + (pex_idx) * 4)
+#define LTSSM_STATE_SHIFT	20
+#define LTSSM_STATE_MASK	0x3f
+#define LTSSM_PCIE_L0		0x11 /* L0 state */
+
+/* PEX Internal Configuration Registers */
+#define PCIE_STRFMR1		0x71c /* Symbol Timer & Filter Mask Register1 */
+#define PCIE_ABSERR		0x8d0 /* Bridge Slave Error Response Register */
+#define PCIE_ABSERR_SETTING	0x9401 /* Forward error of non-posted request */
+
+#define PCIE_IATU_NUM		6
+
+struct ls_pcie_drvdata {
+	u32 lut_offset;
+	u32 ltssm_shift;
+	u32 lut_dbg;
+	const struct dw_pcie_host_ops *ops;
+	const struct dw_pcie_ops *dw_pcie_ops;
+};
+
+struct ls_pcie {
+	struct dw_pcie *pci;
+	void __iomem *lut;
+	struct regmap *scfg;
+	const struct ls_pcie_drvdata *drvdata;
+	int index;
+};
+
+#define to_ls_pcie(x)	dev_get_drvdata((x)->dev)
+
+static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
+{
+	struct dw_pcie *pci = pcie->pci;
+	u32 header_type;
+
+	header_type = ioread8(pci->dbi_base + PCI_HEADER_TYPE);
+	header_type &= 0x7f;
+
+	return header_type == PCI_HEADER_TYPE_BRIDGE;
+}
+
+/* Clear multi-function bit */
+static void ls_pcie_clear_multifunction(struct ls_pcie *pcie)
+{
+	struct dw_pcie *pci = pcie->pci;
+
+	iowrite8(PCI_HEADER_TYPE_BRIDGE, pci->dbi_base + PCI_HEADER_TYPE);
+}
+
+/* Drop MSG TLP except for Vendor MSG */
+static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
+{
+	u32 val;
+	struct dw_pcie *pci = pcie->pci;
+
+	val = ioread32(pci->dbi_base + PCIE_STRFMR1);
+	val &= 0xDFFFFFFF;
+	iowrite32(val, pci->dbi_base + PCIE_STRFMR1);
+}
+
+static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie)
+{
+	int i;
+
+	for (i = 0; i < PCIE_IATU_NUM; i++)
+		dw_pcie_disable_atu(pcie->pci, i, DW_PCIE_REGION_OUTBOUND);
+}
+
+static int ls1021_pcie_link_up(struct dw_pcie *pci)
+{
+	u32 state;
+	struct ls_pcie *pcie = to_ls_pcie(pci);
+
+	if (!pcie->scfg)
+		return 0;
+
+	regmap_read(pcie->scfg, SCFG_PEXMSCPORTSR(pcie->index), &state);
+	state = (state >> LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK;
+
+	if (state < LTSSM_PCIE_L0)
+		return 0;
+
+	return 1;
+}
+
+static int ls_pcie_link_up(struct dw_pcie *pci)
+{
+	struct ls_pcie *pcie = to_ls_pcie(pci);
+	u32 state;
+
+	state = (ioread32(pcie->lut + pcie->drvdata->lut_dbg) >>
+		 pcie->drvdata->ltssm_shift) &
+		 LTSSM_STATE_MASK;
+
+	if (state < LTSSM_PCIE_L0)
+		return 0;
+
+	return 1;
+}
+
+/* Forward error response of outbound non-posted requests */
+static void ls_pcie_fix_error_response(struct ls_pcie *pcie)
+{
+	struct dw_pcie *pci = pcie->pci;
+
+	iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR);
+}
+
+static int ls_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct ls_pcie *pcie = to_ls_pcie(pci);
+
+	/*
+	 * Disable outbound windows configured by the bootloader to avoid
+	 * one transaction hitting multiple outbound windows.
+	 * dw_pcie_setup_rc() will reconfigure the outbound windows.
+	 */
+	ls_pcie_disable_outbound_atus(pcie);
+	ls_pcie_fix_error_response(pcie);
+
+	dw_pcie_dbi_ro_wr_en(pci);
+	ls_pcie_clear_multifunction(pcie);
+	dw_pcie_dbi_ro_wr_dis(pci);
+
+	ls_pcie_drop_msg_tlp(pcie);
+
+	dw_pcie_setup_rc(pp);
+
+	return 0;
+}
+
+static int ls1021_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct ls_pcie *pcie = to_ls_pcie(pci);
+	struct device *dev = pci->dev;
+	u32 index[2];
+	int ret;
+
+	pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node,
+						     "fsl,pcie-scfg");
+	if (IS_ERR(pcie->scfg)) {
+		ret = PTR_ERR(pcie->scfg);
+		dev_err(dev, "No syscfg phandle specified\n");
+		pcie->scfg = NULL;
+		return ret;
+	}
+
+	if (of_property_read_u32_array(dev->of_node,
+				       "fsl,pcie-scfg", index, 2)) {
+		pcie->scfg = NULL;
+		return -EINVAL;
+	}
+	pcie->index = index[1];
+
+	return ls_pcie_host_init(pp);
+}
+
+static int ls_pcie_msi_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct device *dev = pci->dev;
+	struct device_node *np = dev->of_node;
+	struct device_node *msi_node;
+
+	/*
+	 * The MSI domain is set by the generic of_msi_configure().  This
+	 * .msi_host_init() function keeps us from doing the default MSI
+	 * domain setup in dw_pcie_host_init() and also enforces the
+	 * requirement that "msi-parent" exists.
+	 */
+	msi_node = of_parse_phandle(np, "msi-parent", 0);
+	if (!msi_node) {
+		dev_err(dev, "failed to find msi-parent\n");
+		return -EINVAL;
+	}
+
+	of_node_put(msi_node);
+	return 0;
+}
+
+static const struct dw_pcie_host_ops ls1021_pcie_host_ops = {
+	.host_init = ls1021_pcie_host_init,
+	.msi_host_init = ls_pcie_msi_host_init,
+};
+
+static const struct dw_pcie_host_ops ls_pcie_host_ops = {
+	.host_init = ls_pcie_host_init,
+	.msi_host_init = ls_pcie_msi_host_init,
+};
+
+static const struct dw_pcie_ops dw_ls1021_pcie_ops = {
+	.link_up = ls1021_pcie_link_up,
+};
+
+static const struct dw_pcie_ops dw_ls_pcie_ops = {
+	.link_up = ls_pcie_link_up,
+};
+
+static const struct ls_pcie_drvdata ls1021_drvdata = {
+	.ops = &ls1021_pcie_host_ops,
+	.dw_pcie_ops = &dw_ls1021_pcie_ops,
+};
+
+static const struct ls_pcie_drvdata ls1043_drvdata = {
+	.lut_offset = 0x10000,
+	.ltssm_shift = 24,
+	.lut_dbg = 0x7fc,
+	.ops = &ls_pcie_host_ops,
+	.dw_pcie_ops = &dw_ls_pcie_ops,
+};
+
+static const struct ls_pcie_drvdata ls1046_drvdata = {
+	.lut_offset = 0x80000,
+	.ltssm_shift = 24,
+	.lut_dbg = 0x407fc,
+	.ops = &ls_pcie_host_ops,
+	.dw_pcie_ops = &dw_ls_pcie_ops,
+};
+
+static const struct ls_pcie_drvdata ls2080_drvdata = {
+	.lut_offset = 0x80000,
+	.ltssm_shift = 0,
+	.lut_dbg = 0x7fc,
+	.ops = &ls_pcie_host_ops,
+	.dw_pcie_ops = &dw_ls_pcie_ops,
+};
+
+static const struct ls_pcie_drvdata ls2088_drvdata = {
+	.lut_offset = 0x80000,
+	.ltssm_shift = 0,
+	.lut_dbg = 0x407fc,
+	.ops = &ls_pcie_host_ops,
+	.dw_pcie_ops = &dw_ls_pcie_ops,
+};
+
+static const struct of_device_id ls_pcie_of_match[] = {
+	{ .compatible = "fsl,ls1012a-pcie", .data = &ls1046_drvdata },
+	{ .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
+	{ .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
+	{ .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
+	{ .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
+	{ .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
+	{ .compatible = "fsl,ls2088a-pcie", .data = &ls2088_drvdata },
+	{ .compatible = "fsl,ls1088a-pcie", .data = &ls2088_drvdata },
+	{ },
+};
+
+static int __init ls_add_pcie_port(struct ls_pcie *pcie)
+{
+	struct dw_pcie *pci = pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = pci->dev;
+	int ret;
+
+	pp->ops = pcie->drvdata->ops;
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "failed to initialize host\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int __init ls_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dw_pcie *pci;
+	struct ls_pcie *pcie;
+	struct resource *dbi_base;
+	int ret;
+
+	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+	if (!pcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	pcie->drvdata = of_device_get_match_data(dev);
+
+	pci->dev = dev;
+	pci->ops = pcie->drvdata->dw_pcie_ops;
+
+	pcie->pci = pci;
+
+	dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+	pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
+	if (IS_ERR(pci->dbi_base))
+		return PTR_ERR(pci->dbi_base);
+
+	pcie->lut = pci->dbi_base + pcie->drvdata->lut_offset;
+
+	if (!ls_pcie_is_bridge(pcie))
+		return -ENODEV;
+
+	platform_set_drvdata(pdev, pcie);
+
+	ret = ls_add_pcie_port(pcie);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static struct platform_driver ls_pcie_driver = {
+	.driver = {
+		.name = "layerscape-pcie",
+		.of_match_table = ls_pcie_of_match,
+		.suppress_bind_attrs = true,
+	},
+};
+builtin_platform_driver_probe(ls_pcie_driver, ls_pcie_probe);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pci-meson.c b/marvell/linux/drivers/pci/controller/dwc/pci-meson.c
new file mode 100644
index 0000000..8c9f887
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pci-meson.c
@@ -0,0 +1,593 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Amlogic MESON SoCs
+ *
+ * Copyright (c) 2018 Amlogic, inc.
+ * Author: Yue Wang <yue.wang@amlogic.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define to_meson_pcie(x) dev_get_drvdata((x)->dev)
+
+/* External local bus interface registers */
+#define PLR_OFFSET			0x700
+#define PCIE_PORT_LINK_CTRL_OFF		(PLR_OFFSET + 0x10)
+#define FAST_LINK_MODE			BIT(7)
+#define LINK_CAPABLE_MASK		GENMASK(21, 16)
+#define LINK_CAPABLE_X1			BIT(16)
+
+#define PCIE_GEN2_CTRL_OFF		(PLR_OFFSET + 0x10c)
+#define NUM_OF_LANES_MASK		GENMASK(12, 8)
+#define NUM_OF_LANES_X1			BIT(8)
+#define DIRECT_SPEED_CHANGE		BIT(17)
+
+#define TYPE1_HDR_OFFSET		0x0
+#define PCIE_STATUS_COMMAND		(TYPE1_HDR_OFFSET + 0x04)
+#define PCI_IO_EN			BIT(0)
+#define PCI_MEM_SPACE_EN		BIT(1)
+#define PCI_BUS_MASTER_EN		BIT(2)
+
+#define PCIE_BASE_ADDR0			(TYPE1_HDR_OFFSET + 0x10)
+#define PCIE_BASE_ADDR1			(TYPE1_HDR_OFFSET + 0x14)
+
+#define PCIE_CAP_OFFSET			0x70
+#define PCIE_DEV_CTRL_DEV_STUS		(PCIE_CAP_OFFSET + 0x08)
+#define PCIE_CAP_MAX_PAYLOAD_MASK	GENMASK(7, 5)
+#define PCIE_CAP_MAX_PAYLOAD_SIZE(x)	((x) << 5)
+#define PCIE_CAP_MAX_READ_REQ_MASK	GENMASK(14, 12)
+#define PCIE_CAP_MAX_READ_REQ_SIZE(x)	((x) << 12)
+
+/* PCIe specific config registers */
+#define PCIE_CFG0			0x0
+#define APP_LTSSM_ENABLE		BIT(7)
+
+#define PCIE_CFG_STATUS12		0x30
+#define IS_SMLH_LINK_UP(x)		((x) & (1 << 6))
+#define IS_RDLH_LINK_UP(x)		((x) & (1 << 16))
+#define IS_LTSSM_UP(x)			((((x) >> 10) & 0x1f) == 0x11)
+
+#define PCIE_CFG_STATUS17		0x44
+#define PM_CURRENT_STATE(x)		(((x) >> 7) & 0x1)
+
+#define WAIT_LINKUP_TIMEOUT		4000
+#define PORT_CLK_RATE			100000000UL
+#define MAX_PAYLOAD_SIZE		256
+#define MAX_READ_REQ_SIZE		256
+#define MESON_PCIE_PHY_POWERUP		0x1c
+#define PCIE_RESET_DELAY		500
+#define PCIE_SHARED_RESET		1
+#define PCIE_NORMAL_RESET		0
+
+enum pcie_data_rate {
+	PCIE_GEN1,
+	PCIE_GEN2,
+	PCIE_GEN3,
+	PCIE_GEN4
+};
+
+struct meson_pcie_mem_res {
+	void __iomem *elbi_base;
+	void __iomem *cfg_base;
+	void __iomem *phy_base;
+};
+
+struct meson_pcie_clk_res {
+	struct clk *clk;
+	struct clk *mipi_gate;
+	struct clk *port_clk;
+	struct clk *general_clk;
+};
+
+struct meson_pcie_rc_reset {
+	struct reset_control *phy;
+	struct reset_control *port;
+	struct reset_control *apb;
+};
+
+struct meson_pcie {
+	struct dw_pcie pci;
+	struct meson_pcie_mem_res mem_res;
+	struct meson_pcie_clk_res clk_res;
+	struct meson_pcie_rc_reset mrst;
+	struct gpio_desc *reset_gpio;
+};
+
+static struct reset_control *meson_pcie_get_reset(struct meson_pcie *mp,
+						  const char *id,
+						  u32 reset_type)
+{
+	struct device *dev = mp->pci.dev;
+	struct reset_control *reset;
+
+	if (reset_type == PCIE_SHARED_RESET)
+		reset = devm_reset_control_get_shared(dev, id);
+	else
+		reset = devm_reset_control_get(dev, id);
+
+	return reset;
+}
+
+static int meson_pcie_get_resets(struct meson_pcie *mp)
+{
+	struct meson_pcie_rc_reset *mrst = &mp->mrst;
+
+	mrst->phy = meson_pcie_get_reset(mp, "phy", PCIE_SHARED_RESET);
+	if (IS_ERR(mrst->phy))
+		return PTR_ERR(mrst->phy);
+	reset_control_deassert(mrst->phy);
+
+	mrst->port = meson_pcie_get_reset(mp, "port", PCIE_NORMAL_RESET);
+	if (IS_ERR(mrst->port))
+		return PTR_ERR(mrst->port);
+	reset_control_deassert(mrst->port);
+
+	mrst->apb = meson_pcie_get_reset(mp, "apb", PCIE_SHARED_RESET);
+	if (IS_ERR(mrst->apb))
+		return PTR_ERR(mrst->apb);
+	reset_control_deassert(mrst->apb);
+
+	return 0;
+}
+
+static void __iomem *meson_pcie_get_mem(struct platform_device *pdev,
+					struct meson_pcie *mp,
+					const char *id)
+{
+	struct device *dev = mp->pci.dev;
+	struct resource *res;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, id);
+
+	return devm_ioremap_resource(dev, res);
+}
+
+static void __iomem *meson_pcie_get_mem_shared(struct platform_device *pdev,
+					       struct meson_pcie *mp,
+					       const char *id)
+{
+	struct device *dev = mp->pci.dev;
+	struct resource *res;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, id);
+	if (!res) {
+		dev_err(dev, "No REG resource %s\n", id);
+		return ERR_PTR(-ENXIO);
+	}
+
+	return devm_ioremap(dev, res->start, resource_size(res));
+}
+
+static int meson_pcie_get_mems(struct platform_device *pdev,
+			       struct meson_pcie *mp)
+{
+	mp->mem_res.elbi_base = meson_pcie_get_mem(pdev, mp, "elbi");
+	if (IS_ERR(mp->mem_res.elbi_base))
+		return PTR_ERR(mp->mem_res.elbi_base);
+
+	mp->mem_res.cfg_base = meson_pcie_get_mem(pdev, mp, "cfg");
+	if (IS_ERR(mp->mem_res.cfg_base))
+		return PTR_ERR(mp->mem_res.cfg_base);
+
+	/* Meson SoC has two PCI controllers use same phy register*/
+	mp->mem_res.phy_base = meson_pcie_get_mem_shared(pdev, mp, "phy");
+	if (IS_ERR(mp->mem_res.phy_base))
+		return PTR_ERR(mp->mem_res.phy_base);
+
+	return 0;
+}
+
+static void meson_pcie_power_on(struct meson_pcie *mp)
+{
+	writel(MESON_PCIE_PHY_POWERUP, mp->mem_res.phy_base);
+}
+
+static void meson_pcie_reset(struct meson_pcie *mp)
+{
+	struct meson_pcie_rc_reset *mrst = &mp->mrst;
+
+	reset_control_assert(mrst->phy);
+	udelay(PCIE_RESET_DELAY);
+	reset_control_deassert(mrst->phy);
+	udelay(PCIE_RESET_DELAY);
+
+	reset_control_assert(mrst->port);
+	reset_control_assert(mrst->apb);
+	udelay(PCIE_RESET_DELAY);
+	reset_control_deassert(mrst->port);
+	reset_control_deassert(mrst->apb);
+	udelay(PCIE_RESET_DELAY);
+}
+
+static inline struct clk *meson_pcie_probe_clock(struct device *dev,
+						 const char *id, u64 rate)
+{
+	struct clk *clk;
+	int ret;
+
+	clk = devm_clk_get(dev, id);
+	if (IS_ERR(clk))
+		return clk;
+
+	if (rate) {
+		ret = clk_set_rate(clk, rate);
+		if (ret) {
+			dev_err(dev, "set clk rate failed, ret = %d\n", ret);
+			return ERR_PTR(ret);
+		}
+	}
+
+	ret = clk_prepare_enable(clk);
+	if (ret) {
+		dev_err(dev, "couldn't enable clk\n");
+		return ERR_PTR(ret);
+	}
+
+	devm_add_action_or_reset(dev,
+				 (void (*) (void *))clk_disable_unprepare,
+				 clk);
+
+	return clk;
+}
+
+static int meson_pcie_probe_clocks(struct meson_pcie *mp)
+{
+	struct device *dev = mp->pci.dev;
+	struct meson_pcie_clk_res *res = &mp->clk_res;
+
+	res->port_clk = meson_pcie_probe_clock(dev, "port", PORT_CLK_RATE);
+	if (IS_ERR(res->port_clk))
+		return PTR_ERR(res->port_clk);
+
+	res->mipi_gate = meson_pcie_probe_clock(dev, "mipi", 0);
+	if (IS_ERR(res->mipi_gate))
+		return PTR_ERR(res->mipi_gate);
+
+	res->general_clk = meson_pcie_probe_clock(dev, "general", 0);
+	if (IS_ERR(res->general_clk))
+		return PTR_ERR(res->general_clk);
+
+	res->clk = meson_pcie_probe_clock(dev, "pclk", 0);
+	if (IS_ERR(res->clk))
+		return PTR_ERR(res->clk);
+
+	return 0;
+}
+
+static inline void meson_elb_writel(struct meson_pcie *mp, u32 val, u32 reg)
+{
+	writel(val, mp->mem_res.elbi_base + reg);
+}
+
+static inline u32 meson_elb_readl(struct meson_pcie *mp, u32 reg)
+{
+	return readl(mp->mem_res.elbi_base + reg);
+}
+
+static inline u32 meson_cfg_readl(struct meson_pcie *mp, u32 reg)
+{
+	return readl(mp->mem_res.cfg_base + reg);
+}
+
+static inline void meson_cfg_writel(struct meson_pcie *mp, u32 val, u32 reg)
+{
+	writel(val, mp->mem_res.cfg_base + reg);
+}
+
+static void meson_pcie_assert_reset(struct meson_pcie *mp)
+{
+	gpiod_set_value_cansleep(mp->reset_gpio, 0);
+	udelay(500);
+	gpiod_set_value_cansleep(mp->reset_gpio, 1);
+}
+
+static void meson_pcie_init_dw(struct meson_pcie *mp)
+{
+	u32 val;
+
+	val = meson_cfg_readl(mp, PCIE_CFG0);
+	val |= APP_LTSSM_ENABLE;
+	meson_cfg_writel(mp, val, PCIE_CFG0);
+
+	val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF);
+	val &= ~(LINK_CAPABLE_MASK | FAST_LINK_MODE);
+	meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF);
+
+	val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF);
+	val |= LINK_CAPABLE_X1;
+	meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF);
+
+	val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF);
+	val &= ~NUM_OF_LANES_MASK;
+	meson_elb_writel(mp, val, PCIE_GEN2_CTRL_OFF);
+
+	val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF);
+	val |= NUM_OF_LANES_X1 | DIRECT_SPEED_CHANGE;
+	meson_elb_writel(mp, val, PCIE_GEN2_CTRL_OFF);
+
+	meson_elb_writel(mp, 0x0, PCIE_BASE_ADDR0);
+	meson_elb_writel(mp, 0x0, PCIE_BASE_ADDR1);
+}
+
+static int meson_size_to_payload(struct meson_pcie *mp, int size)
+{
+	struct device *dev = mp->pci.dev;
+
+	/*
+	 * dwc supports 2^(val+7) payload size, which val is 0~5 default to 1.
+	 * So if input size is not 2^order alignment or less than 2^7 or bigger
+	 * than 2^12, just set to default size 2^(1+7).
+	 */
+	if (!is_power_of_2(size) || size < 128 || size > 4096) {
+		dev_warn(dev, "payload size %d, set to default 256\n", size);
+		return 1;
+	}
+
+	return fls(size) - 8;
+}
+
+static void meson_set_max_payload(struct meson_pcie *mp, int size)
+{
+	u32 val;
+	int max_payload_size = meson_size_to_payload(mp, size);
+
+	val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
+	val &= ~PCIE_CAP_MAX_PAYLOAD_MASK;
+	meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+
+	val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
+	val |= PCIE_CAP_MAX_PAYLOAD_SIZE(max_payload_size);
+	meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+}
+
+static void meson_set_max_rd_req_size(struct meson_pcie *mp, int size)
+{
+	u32 val;
+	int max_rd_req_size = meson_size_to_payload(mp, size);
+
+	val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
+	val &= ~PCIE_CAP_MAX_READ_REQ_MASK;
+	meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+
+	val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
+	val |= PCIE_CAP_MAX_READ_REQ_SIZE(max_rd_req_size);
+	meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+}
+
+static inline void meson_enable_memory_space(struct meson_pcie *mp)
+{
+	/* Set the RC Bus Master, Memory Space and I/O Space enables */
+	meson_elb_writel(mp, PCI_IO_EN | PCI_MEM_SPACE_EN | PCI_BUS_MASTER_EN,
+			 PCIE_STATUS_COMMAND);
+}
+
+static int meson_pcie_establish_link(struct meson_pcie *mp)
+{
+	struct dw_pcie *pci = &mp->pci;
+	struct pcie_port *pp = &pci->pp;
+
+	meson_pcie_init_dw(mp);
+	meson_set_max_payload(mp, MAX_PAYLOAD_SIZE);
+	meson_set_max_rd_req_size(mp, MAX_READ_REQ_SIZE);
+
+	dw_pcie_setup_rc(pp);
+	meson_enable_memory_space(mp);
+
+	meson_pcie_assert_reset(mp);
+
+	return dw_pcie_wait_for_link(pci);
+}
+
+static void meson_pcie_enable_interrupts(struct meson_pcie *mp)
+{
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		dw_pcie_msi_init(&mp->pci.pp);
+}
+
+static int meson_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
+				  u32 *val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	int ret;
+
+	ret = dw_pcie_read(pci->dbi_base + where, size, val);
+	if (ret != PCIBIOS_SUCCESSFUL)
+		return ret;
+
+	/*
+	 * There is a bug in the MESON AXG PCIe controller whereby software
+	 * cannot program the PCI_CLASS_DEVICE register, so we must fabricate
+	 * the return value in the config accessors.
+	 */
+	if (where == PCI_CLASS_REVISION && size == 4)
+		*val = (PCI_CLASS_BRIDGE_PCI << 16) | (*val & 0xffff);
+	else if (where == PCI_CLASS_DEVICE && size == 2)
+		*val = PCI_CLASS_BRIDGE_PCI;
+	else if (where == PCI_CLASS_DEVICE && size == 1)
+		*val = PCI_CLASS_BRIDGE_PCI & 0xff;
+	else if (where == PCI_CLASS_DEVICE + 1 && size == 1)
+		*val = (PCI_CLASS_BRIDGE_PCI >> 8) & 0xff;
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int meson_pcie_wr_own_conf(struct pcie_port *pp, int where,
+				  int size, u32 val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+	return dw_pcie_write(pci->dbi_base + where, size, val);
+}
+
+static int meson_pcie_link_up(struct dw_pcie *pci)
+{
+	struct meson_pcie *mp = to_meson_pcie(pci);
+	struct device *dev = pci->dev;
+	u32 speed_okay = 0;
+	u32 cnt = 0;
+	u32 state12, state17, smlh_up, ltssm_up, rdlh_up;
+
+	do {
+		state12 = meson_cfg_readl(mp, PCIE_CFG_STATUS12);
+		state17 = meson_cfg_readl(mp, PCIE_CFG_STATUS17);
+		smlh_up = IS_SMLH_LINK_UP(state12);
+		rdlh_up = IS_RDLH_LINK_UP(state12);
+		ltssm_up = IS_LTSSM_UP(state12);
+
+		if (PM_CURRENT_STATE(state17) < PCIE_GEN3)
+			speed_okay = 1;
+
+		if (smlh_up)
+			dev_dbg(dev, "smlh_link_up is on\n");
+		if (rdlh_up)
+			dev_dbg(dev, "rdlh_link_up is on\n");
+		if (ltssm_up)
+			dev_dbg(dev, "ltssm_up is on\n");
+		if (speed_okay)
+			dev_dbg(dev, "speed_okay\n");
+
+		if (smlh_up && rdlh_up && ltssm_up && speed_okay)
+			return 1;
+
+		cnt++;
+
+		udelay(10);
+	} while (cnt < WAIT_LINKUP_TIMEOUT);
+
+	dev_err(dev, "error: wait linkup timeout\n");
+	return 0;
+}
+
+static int meson_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct meson_pcie *mp = to_meson_pcie(pci);
+	int ret;
+
+	ret = meson_pcie_establish_link(mp);
+	if (ret)
+		return ret;
+
+	meson_pcie_enable_interrupts(mp);
+
+	return 0;
+}
+
+static const struct dw_pcie_host_ops meson_pcie_host_ops = {
+	.rd_own_conf = meson_pcie_rd_own_conf,
+	.wr_own_conf = meson_pcie_wr_own_conf,
+	.host_init = meson_pcie_host_init,
+};
+
+static int meson_add_pcie_port(struct meson_pcie *mp,
+			       struct platform_device *pdev)
+{
+	struct dw_pcie *pci = &mp->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		pp->msi_irq = platform_get_irq(pdev, 0);
+		if (pp->msi_irq < 0) {
+			dev_err(dev, "failed to get MSI IRQ\n");
+			return pp->msi_irq;
+		}
+	}
+
+	pp->ops = &meson_pcie_host_ops;
+	pci->dbi_base = mp->mem_res.elbi_base;
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "failed to initialize host\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	.link_up = meson_pcie_link_up,
+};
+
+static int meson_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dw_pcie *pci;
+	struct meson_pcie *mp;
+	int ret;
+
+	mp = devm_kzalloc(dev, sizeof(*mp), GFP_KERNEL);
+	if (!mp)
+		return -ENOMEM;
+
+	pci = &mp->pci;
+	pci->dev = dev;
+	pci->ops = &dw_pcie_ops;
+
+	mp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+	if (IS_ERR(mp->reset_gpio)) {
+		dev_err(dev, "get reset gpio failed\n");
+		return PTR_ERR(mp->reset_gpio);
+	}
+
+	ret = meson_pcie_get_resets(mp);
+	if (ret) {
+		dev_err(dev, "get reset resource failed, %d\n", ret);
+		return ret;
+	}
+
+	ret = meson_pcie_get_mems(pdev, mp);
+	if (ret) {
+		dev_err(dev, "get memory resource failed, %d\n", ret);
+		return ret;
+	}
+
+	meson_pcie_power_on(mp);
+	meson_pcie_reset(mp);
+
+	ret = meson_pcie_probe_clocks(mp);
+	if (ret) {
+		dev_err(dev, "init clock resources failed, %d\n", ret);
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, mp);
+
+	ret = meson_add_pcie_port(mp, pdev);
+	if (ret < 0) {
+		dev_err(dev, "Add PCIe port failed, %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id meson_pcie_of_match[] = {
+	{
+		.compatible = "amlogic,axg-pcie",
+	},
+	{},
+};
+
+static struct platform_driver meson_pcie_driver = {
+	.probe = meson_pcie_probe,
+	.driver = {
+		.name = "meson-pcie",
+		.of_match_table = meson_pcie_of_match,
+	},
+};
+
+builtin_platform_driver(meson_pcie_driver);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-al.c b/marvell/linux/drivers/pci/controller/dwc/pcie-al.c
new file mode 100644
index 0000000..1eeda2f
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-al.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Amazon's Annapurna Labs IP (used in chips
+ * such as Graviton and Alpine)
+ *
+ * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Author: Jonathan Chocron <jonnyc@amazon.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/pci-acpi.h>
+#include "../../pci.h"
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+
+struct al_pcie_acpi  {
+	void __iomem *dbi_base;
+};
+
+static void __iomem *al_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+				     int where)
+{
+	struct pci_config_window *cfg = bus->sysdata;
+	struct al_pcie_acpi *pcie = cfg->priv;
+	void __iomem *dbi_base = pcie->dbi_base;
+
+	if (bus->number == cfg->busr.start) {
+		/*
+		 * The DW PCIe core doesn't filter out transactions to other
+		 * devices/functions on the root bus num, so we do this here.
+		 */
+		if (PCI_SLOT(devfn) > 0)
+			return NULL;
+		else
+			return dbi_base + where;
+	}
+
+	return pci_ecam_map_bus(bus, devfn, where);
+}
+
+static int al_pcie_init(struct pci_config_window *cfg)
+{
+	struct device *dev = cfg->parent;
+	struct acpi_device *adev = to_acpi_device(dev);
+	struct acpi_pci_root *root = acpi_driver_data(adev);
+	struct al_pcie_acpi *al_pcie;
+	struct resource *res;
+	int ret;
+
+	al_pcie = devm_kzalloc(dev, sizeof(*al_pcie), GFP_KERNEL);
+	if (!al_pcie)
+		return -ENOMEM;
+
+	res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
+	if (!res)
+		return -ENOMEM;
+
+	ret = acpi_get_rc_resources(dev, "AMZN0001", root->segment, res);
+	if (ret) {
+		dev_err(dev, "can't get rc dbi base address for SEG %d\n",
+			root->segment);
+		return ret;
+	}
+
+	dev_dbg(dev, "Root port dbi res: %pR\n", res);
+
+	al_pcie->dbi_base = devm_pci_remap_cfg_resource(dev, res);
+	if (IS_ERR(al_pcie->dbi_base)) {
+		long err = PTR_ERR(al_pcie->dbi_base);
+
+		dev_err(dev, "couldn't remap dbi base %pR (err:%ld)\n",
+			res, err);
+		return err;
+	}
+
+	cfg->priv = al_pcie;
+
+	return 0;
+}
+
+struct pci_ecam_ops al_pcie_ops = {
+	.bus_shift    = 20,
+	.init         =  al_pcie_init,
+	.pci_ops      = {
+		.map_bus    = al_pcie_map_bus,
+		.read       = pci_generic_config_read,
+		.write      = pci_generic_config_write,
+	}
+};
+
+#endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */
+
+#ifdef CONFIG_PCIE_AL
+
+#include <linux/of_pci.h>
+#include "pcie-designware.h"
+
+#define AL_PCIE_REV_ID_2	2
+#define AL_PCIE_REV_ID_3	3
+#define AL_PCIE_REV_ID_4	4
+
+#define AXI_BASE_OFFSET		0x0
+
+#define DEVICE_ID_OFFSET	0x16c
+
+#define DEVICE_REV_ID			0x0
+#define DEVICE_REV_ID_DEV_ID_MASK	GENMASK(31, 16)
+
+#define DEVICE_REV_ID_DEV_ID_X4		0
+#define DEVICE_REV_ID_DEV_ID_X8		2
+#define DEVICE_REV_ID_DEV_ID_X16	4
+
+#define OB_CTRL_REV1_2_OFFSET	0x0040
+#define OB_CTRL_REV3_5_OFFSET	0x0030
+
+#define CFG_TARGET_BUS			0x0
+#define CFG_TARGET_BUS_MASK_MASK	GENMASK(7, 0)
+#define CFG_TARGET_BUS_BUSNUM_MASK	GENMASK(15, 8)
+
+#define CFG_CONTROL			0x4
+#define CFG_CONTROL_SUBBUS_MASK		GENMASK(15, 8)
+#define CFG_CONTROL_SEC_BUS_MASK	GENMASK(23, 16)
+
+struct al_pcie_reg_offsets {
+	unsigned int ob_ctrl;
+};
+
+struct al_pcie_target_bus_cfg {
+	u8 reg_val;
+	u8 reg_mask;
+	u8 ecam_mask;
+};
+
+struct al_pcie {
+	struct dw_pcie *pci;
+	void __iomem *controller_base; /* base of PCIe unit (not DW core) */
+	struct device *dev;
+	resource_size_t ecam_size;
+	unsigned int controller_rev_id;
+	struct al_pcie_reg_offsets reg_offsets;
+	struct al_pcie_target_bus_cfg target_bus_cfg;
+};
+
+#define PCIE_ECAM_DEVFN(x)		(((x) & 0xff) << 12)
+
+#define to_al_pcie(x)		dev_get_drvdata((x)->dev)
+
+static inline u32 al_pcie_controller_readl(struct al_pcie *pcie, u32 offset)
+{
+	return readl_relaxed(pcie->controller_base + offset);
+}
+
+static inline void al_pcie_controller_writel(struct al_pcie *pcie, u32 offset,
+					     u32 val)
+{
+	writel_relaxed(val, pcie->controller_base + offset);
+}
+
+static int al_pcie_rev_id_get(struct al_pcie *pcie, unsigned int *rev_id)
+{
+	u32 dev_rev_id_val;
+	u32 dev_id_val;
+
+	dev_rev_id_val = al_pcie_controller_readl(pcie, AXI_BASE_OFFSET +
+						  DEVICE_ID_OFFSET +
+						  DEVICE_REV_ID);
+	dev_id_val = FIELD_GET(DEVICE_REV_ID_DEV_ID_MASK, dev_rev_id_val);
+
+	switch (dev_id_val) {
+	case DEVICE_REV_ID_DEV_ID_X4:
+		*rev_id = AL_PCIE_REV_ID_2;
+		break;
+	case DEVICE_REV_ID_DEV_ID_X8:
+		*rev_id = AL_PCIE_REV_ID_3;
+		break;
+	case DEVICE_REV_ID_DEV_ID_X16:
+		*rev_id = AL_PCIE_REV_ID_4;
+		break;
+	default:
+		dev_err(pcie->dev, "Unsupported dev_id_val (0x%x)\n",
+			dev_id_val);
+		return -EINVAL;
+	}
+
+	dev_dbg(pcie->dev, "dev_id_val: 0x%x\n", dev_id_val);
+
+	return 0;
+}
+
+static int al_pcie_reg_offsets_set(struct al_pcie *pcie)
+{
+	switch (pcie->controller_rev_id) {
+	case AL_PCIE_REV_ID_2:
+		pcie->reg_offsets.ob_ctrl = OB_CTRL_REV1_2_OFFSET;
+		break;
+	case AL_PCIE_REV_ID_3:
+	case AL_PCIE_REV_ID_4:
+		pcie->reg_offsets.ob_ctrl = OB_CTRL_REV3_5_OFFSET;
+		break;
+	default:
+		dev_err(pcie->dev, "Unsupported controller rev_id: 0x%x\n",
+			pcie->controller_rev_id);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static inline void al_pcie_target_bus_set(struct al_pcie *pcie,
+					  u8 target_bus,
+					  u8 mask_target_bus)
+{
+	u32 reg;
+
+	reg = FIELD_PREP(CFG_TARGET_BUS_MASK_MASK, mask_target_bus) |
+	      FIELD_PREP(CFG_TARGET_BUS_BUSNUM_MASK, target_bus);
+
+	al_pcie_controller_writel(pcie, AXI_BASE_OFFSET +
+				  pcie->reg_offsets.ob_ctrl + CFG_TARGET_BUS,
+				  reg);
+}
+
+static void __iomem *al_pcie_conf_addr_map(struct al_pcie *pcie,
+					   unsigned int busnr,
+					   unsigned int devfn)
+{
+	struct al_pcie_target_bus_cfg *target_bus_cfg = &pcie->target_bus_cfg;
+	unsigned int busnr_ecam = busnr & target_bus_cfg->ecam_mask;
+	unsigned int busnr_reg = busnr & target_bus_cfg->reg_mask;
+	struct pcie_port *pp = &pcie->pci->pp;
+	void __iomem *pci_base_addr;
+
+	pci_base_addr = (void __iomem *)((uintptr_t)pp->va_cfg0_base +
+					 (busnr_ecam << 20) +
+					 PCIE_ECAM_DEVFN(devfn));
+
+	if (busnr_reg != target_bus_cfg->reg_val) {
+		dev_dbg(pcie->pci->dev, "Changing target bus busnum val from 0x%x to 0x%x\n",
+			target_bus_cfg->reg_val, busnr_reg);
+		target_bus_cfg->reg_val = busnr_reg;
+		al_pcie_target_bus_set(pcie,
+				       target_bus_cfg->reg_val,
+				       target_bus_cfg->reg_mask);
+	}
+
+	return pci_base_addr;
+}
+
+static int al_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+				 unsigned int devfn, int where, int size,
+				 u32 *val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct al_pcie *pcie = to_al_pcie(pci);
+	unsigned int busnr = bus->number;
+	void __iomem *pci_addr;
+	int rc;
+
+	pci_addr = al_pcie_conf_addr_map(pcie, busnr, devfn);
+
+	rc = dw_pcie_read(pci_addr + where, size, val);
+
+	dev_dbg(pci->dev, "%d-byte config read from %04x:%02x:%02x.%d offset 0x%x (pci_addr: 0x%px) - val:0x%x\n",
+		size, pci_domain_nr(bus), bus->number,
+		PCI_SLOT(devfn), PCI_FUNC(devfn), where,
+		(pci_addr + where), *val);
+
+	return rc;
+}
+
+static int al_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+				 unsigned int devfn, int where, int size,
+				 u32 val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct al_pcie *pcie = to_al_pcie(pci);
+	unsigned int busnr = bus->number;
+	void __iomem *pci_addr;
+	int rc;
+
+	pci_addr = al_pcie_conf_addr_map(pcie, busnr, devfn);
+
+	rc = dw_pcie_write(pci_addr + where, size, val);
+
+	dev_dbg(pci->dev, "%d-byte config write to %04x:%02x:%02x.%d offset 0x%x (pci_addr: 0x%px) - val:0x%x\n",
+		size, pci_domain_nr(bus), bus->number,
+		PCI_SLOT(devfn), PCI_FUNC(devfn), where,
+		(pci_addr + where), val);
+
+	return rc;
+}
+
+static void al_pcie_config_prepare(struct al_pcie *pcie)
+{
+	struct al_pcie_target_bus_cfg *target_bus_cfg;
+	struct pcie_port *pp = &pcie->pci->pp;
+	unsigned int ecam_bus_mask;
+	u32 cfg_control_offset;
+	u8 subordinate_bus;
+	u8 secondary_bus;
+	u32 cfg_control;
+	u32 reg;
+
+	target_bus_cfg = &pcie->target_bus_cfg;
+
+	ecam_bus_mask = (pcie->ecam_size >> 20) - 1;
+	if (ecam_bus_mask > 255) {
+		dev_warn(pcie->dev, "ECAM window size is larger than 256MB. Cutting off at 256\n");
+		ecam_bus_mask = 255;
+	}
+
+	/* This portion is taken from the transaction address */
+	target_bus_cfg->ecam_mask = ecam_bus_mask;
+	/* This portion is taken from the cfg_target_bus reg */
+	target_bus_cfg->reg_mask = ~target_bus_cfg->ecam_mask;
+	target_bus_cfg->reg_val = pp->busn->start & target_bus_cfg->reg_mask;
+
+	al_pcie_target_bus_set(pcie, target_bus_cfg->reg_val,
+			       target_bus_cfg->reg_mask);
+
+	secondary_bus = pp->busn->start + 1;
+	subordinate_bus = pp->busn->end;
+
+	/* Set the valid values of secondary and subordinate buses */
+	cfg_control_offset = AXI_BASE_OFFSET + pcie->reg_offsets.ob_ctrl +
+			     CFG_CONTROL;
+
+	cfg_control = al_pcie_controller_readl(pcie, cfg_control_offset);
+
+	reg = cfg_control &
+	      ~(CFG_CONTROL_SEC_BUS_MASK | CFG_CONTROL_SUBBUS_MASK);
+
+	reg |= FIELD_PREP(CFG_CONTROL_SUBBUS_MASK, subordinate_bus) |
+	       FIELD_PREP(CFG_CONTROL_SEC_BUS_MASK, secondary_bus);
+
+	al_pcie_controller_writel(pcie, cfg_control_offset, reg);
+}
+
+static int al_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct al_pcie *pcie = to_al_pcie(pci);
+	int rc;
+
+	rc = al_pcie_rev_id_get(pcie, &pcie->controller_rev_id);
+	if (rc)
+		return rc;
+
+	rc = al_pcie_reg_offsets_set(pcie);
+	if (rc)
+		return rc;
+
+	al_pcie_config_prepare(pcie);
+
+	return 0;
+}
+
+static const struct dw_pcie_host_ops al_pcie_host_ops = {
+	.rd_other_conf = al_pcie_rd_other_conf,
+	.wr_other_conf = al_pcie_wr_other_conf,
+	.host_init = al_pcie_host_init,
+};
+
+static int al_add_pcie_port(struct pcie_port *pp,
+			    struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	pp->ops = &al_pcie_host_ops;
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "failed to initialize host\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+};
+
+static int al_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *controller_res;
+	struct resource *ecam_res;
+	struct resource *dbi_res;
+	struct al_pcie *al_pcie;
+	struct dw_pcie *pci;
+
+	al_pcie = devm_kzalloc(dev, sizeof(*al_pcie), GFP_KERNEL);
+	if (!al_pcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	pci->dev = dev;
+	pci->ops = &dw_pcie_ops;
+
+	al_pcie->pci = pci;
+	al_pcie->dev = dev;
+
+	dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+	pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_res);
+	if (IS_ERR(pci->dbi_base)) {
+		dev_err(dev, "couldn't remap dbi base %pR\n", dbi_res);
+		return PTR_ERR(pci->dbi_base);
+	}
+
+	ecam_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
+	if (!ecam_res) {
+		dev_err(dev, "couldn't find 'config' reg in DT\n");
+		return -ENOENT;
+	}
+	al_pcie->ecam_size = resource_size(ecam_res);
+
+	controller_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						      "controller");
+	al_pcie->controller_base = devm_ioremap_resource(dev, controller_res);
+	if (IS_ERR(al_pcie->controller_base)) {
+		dev_err(dev, "couldn't remap controller base %pR\n",
+			controller_res);
+		return PTR_ERR(al_pcie->controller_base);
+	}
+
+	dev_dbg(dev, "From DT: dbi_base: %pR, controller_base: %pR\n",
+		dbi_res, controller_res);
+
+	platform_set_drvdata(pdev, al_pcie);
+
+	return al_add_pcie_port(&pci->pp, pdev);
+}
+
+static const struct of_device_id al_pcie_of_match[] = {
+	{ .compatible = "amazon,al-alpine-v2-pcie",
+	},
+	{ .compatible = "amazon,al-alpine-v3-pcie",
+	},
+	{},
+};
+
+static struct platform_driver al_pcie_driver = {
+	.driver = {
+		.name	= "al-pcie",
+		.of_match_table = al_pcie_of_match,
+		.suppress_bind_attrs = true,
+	},
+	.probe = al_pcie_probe,
+};
+builtin_platform_driver(al_pcie_driver);
+
+#endif /* CONFIG_PCIE_AL*/
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-armada8k.c b/marvell/linux/drivers/pci/controller/dwc/pcie-armada8k.c
new file mode 100644
index 0000000..4959654
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Marvell Armada-8K SoCs
+ *
+ * Armada-8K PCIe Glue Layer Source Code
+ *
+ * Copyright (C) 2016 Marvell Technology Group Ltd.
+ *
+ * Author: Yehuda Yitshak <yehuday@marvell.com>
+ * Author: Shadi Ammouri <shadi@marvell.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/of_pci.h>
+#include <linux/of_irq.h>
+
+#include "pcie-designware.h"
+
+#define ARMADA8K_PCIE_MAX_LANES PCIE_LNK_X4
+
+struct armada8k_pcie {
+	struct dw_pcie *pci;
+	struct clk *clk;
+	struct clk *clk_reg;
+	struct phy *phy[ARMADA8K_PCIE_MAX_LANES];
+	unsigned int phy_count;
+};
+
+#define PCIE_VENDOR_REGS_OFFSET		0x8000
+
+#define PCIE_GLOBAL_CONTROL_REG		(PCIE_VENDOR_REGS_OFFSET + 0x0)
+#define PCIE_APP_LTSSM_EN		BIT(2)
+#define PCIE_DEVICE_TYPE_SHIFT		4
+#define PCIE_DEVICE_TYPE_MASK		0xF
+#define PCIE_DEVICE_TYPE_RC		0x4 /* Root complex */
+
+#define PCIE_GLOBAL_STATUS_REG		(PCIE_VENDOR_REGS_OFFSET + 0x8)
+#define PCIE_GLB_STS_RDLH_LINK_UP	BIT(1)
+#define PCIE_GLB_STS_PHY_LINK_UP	BIT(9)
+
+#define PCIE_GLOBAL_INT_CAUSE1_REG	(PCIE_VENDOR_REGS_OFFSET + 0x1C)
+#define PCIE_GLOBAL_INT_MASK1_REG	(PCIE_VENDOR_REGS_OFFSET + 0x20)
+#define PCIE_INT_A_ASSERT_MASK		BIT(9)
+#define PCIE_INT_B_ASSERT_MASK		BIT(10)
+#define PCIE_INT_C_ASSERT_MASK		BIT(11)
+#define PCIE_INT_D_ASSERT_MASK		BIT(12)
+
+#define PCIE_ARCACHE_TRC_REG		(PCIE_VENDOR_REGS_OFFSET + 0x50)
+#define PCIE_AWCACHE_TRC_REG		(PCIE_VENDOR_REGS_OFFSET + 0x54)
+#define PCIE_ARUSER_REG			(PCIE_VENDOR_REGS_OFFSET + 0x5C)
+#define PCIE_AWUSER_REG			(PCIE_VENDOR_REGS_OFFSET + 0x60)
+/*
+ * AR/AW Cache defaults: Normal memory, Write-Back, Read / Write
+ * allocate
+ */
+#define ARCACHE_DEFAULT_VALUE		0x3511
+#define AWCACHE_DEFAULT_VALUE		0x5311
+
+#define DOMAIN_OUTER_SHAREABLE		0x2
+#define AX_USER_DOMAIN_MASK		0x3
+#define AX_USER_DOMAIN_SHIFT		4
+
+#define to_armada8k_pcie(x)	dev_get_drvdata((x)->dev)
+
+static void armada8k_pcie_disable_phys(struct armada8k_pcie *pcie)
+{
+	int i;
+
+	for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) {
+		phy_power_off(pcie->phy[i]);
+		phy_exit(pcie->phy[i]);
+	}
+}
+
+static int armada8k_pcie_enable_phys(struct armada8k_pcie *pcie)
+{
+	int ret;
+	int i;
+
+	for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) {
+		ret = phy_init(pcie->phy[i]);
+		if (ret)
+			return ret;
+
+		ret = phy_set_mode_ext(pcie->phy[i], PHY_MODE_PCIE,
+				       pcie->phy_count);
+		if (ret) {
+			phy_exit(pcie->phy[i]);
+			return ret;
+		}
+
+		ret = phy_power_on(pcie->phy[i]);
+		if (ret) {
+			phy_exit(pcie->phy[i]);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int armada8k_pcie_setup_phys(struct armada8k_pcie *pcie)
+{
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	struct device_node *node = dev->of_node;
+	int ret = 0;
+	int i;
+
+	for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) {
+		pcie->phy[i] = devm_of_phy_get_by_index(dev, node, i);
+		if (IS_ERR(pcie->phy[i])) {
+			if (PTR_ERR(pcie->phy[i]) != -ENODEV)
+				return PTR_ERR(pcie->phy[i]);
+
+			pcie->phy[i] = NULL;
+			continue;
+		}
+
+		pcie->phy_count++;
+	}
+
+	/* Old bindings miss the PHY handle, so just warn if there is no PHY */
+	if (!pcie->phy_count)
+		dev_warn(dev, "No available PHY\n");
+
+	ret = armada8k_pcie_enable_phys(pcie);
+	if (ret)
+		dev_err(dev, "Failed to initialize PHY(s) (%d)\n", ret);
+
+	return ret;
+}
+
+static int armada8k_pcie_link_up(struct dw_pcie *pci)
+{
+	u32 reg;
+	u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP;
+
+	reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_STATUS_REG);
+
+	if ((reg & mask) == mask)
+		return 1;
+
+	dev_dbg(pci->dev, "No link detected (Global-Status: 0x%08x).\n", reg);
+	return 0;
+}
+
+static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie)
+{
+	struct dw_pcie *pci = pcie->pci;
+	u32 reg;
+
+	if (!dw_pcie_link_up(pci)) {
+		/* Disable LTSSM state machine to enable configuration */
+		reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
+		reg &= ~(PCIE_APP_LTSSM_EN);
+		dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
+	}
+
+	/* Set the device to root complex mode */
+	reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
+	reg &= ~(PCIE_DEVICE_TYPE_MASK << PCIE_DEVICE_TYPE_SHIFT);
+	reg |= PCIE_DEVICE_TYPE_RC << PCIE_DEVICE_TYPE_SHIFT;
+	dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
+
+	/* Set the PCIe master AxCache attributes */
+	dw_pcie_writel_dbi(pci, PCIE_ARCACHE_TRC_REG, ARCACHE_DEFAULT_VALUE);
+	dw_pcie_writel_dbi(pci, PCIE_AWCACHE_TRC_REG, AWCACHE_DEFAULT_VALUE);
+
+	/* Set the PCIe master AxDomain attributes */
+	reg = dw_pcie_readl_dbi(pci, PCIE_ARUSER_REG);
+	reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
+	reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
+	dw_pcie_writel_dbi(pci, PCIE_ARUSER_REG, reg);
+
+	reg = dw_pcie_readl_dbi(pci, PCIE_AWUSER_REG);
+	reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
+	reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
+	dw_pcie_writel_dbi(pci, PCIE_AWUSER_REG, reg);
+
+	/* Enable INT A-D interrupts */
+	reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG);
+	reg |= PCIE_INT_A_ASSERT_MASK | PCIE_INT_B_ASSERT_MASK |
+	       PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK;
+	dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG, reg);
+
+	if (!dw_pcie_link_up(pci)) {
+		/* Configuration done. Start LTSSM */
+		reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
+		reg |= PCIE_APP_LTSSM_EN;
+		dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
+	}
+
+	/* Wait until the link becomes active again */
+	if (dw_pcie_wait_for_link(pci))
+		dev_err(pci->dev, "Link not up after reconfiguration\n");
+}
+
+static int armada8k_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct armada8k_pcie *pcie = to_armada8k_pcie(pci);
+
+	dw_pcie_setup_rc(pp);
+	armada8k_pcie_establish_link(pcie);
+
+	return 0;
+}
+
+static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
+{
+	struct armada8k_pcie *pcie = arg;
+	struct dw_pcie *pci = pcie->pci;
+	u32 val;
+
+	/*
+	 * Interrupts are directly handled by the device driver of the
+	 * PCI device. However, they are also latched into the PCIe
+	 * controller, so we simply discard them.
+	 */
+	val = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_INT_CAUSE1_REG);
+	dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_CAUSE1_REG, val);
+
+	return IRQ_HANDLED;
+}
+
+static const struct dw_pcie_host_ops armada8k_pcie_host_ops = {
+	.host_init = armada8k_pcie_host_init,
+};
+
+static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
+				  struct platform_device *pdev)
+{
+	struct dw_pcie *pci = pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	pp->ops = &armada8k_pcie_host_ops;
+
+	pp->irq = platform_get_irq(pdev, 0);
+	if (pp->irq < 0) {
+		dev_err(dev, "failed to get irq for port\n");
+		return pp->irq;
+	}
+
+	ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler,
+			       IRQF_SHARED, "armada8k-pcie", pcie);
+	if (ret) {
+		dev_err(dev, "failed to request irq %d\n", pp->irq);
+		return ret;
+	}
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "failed to initialize host: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	.link_up = armada8k_pcie_link_up,
+};
+
+static int armada8k_pcie_probe(struct platform_device *pdev)
+{
+	struct dw_pcie *pci;
+	struct armada8k_pcie *pcie;
+	struct device *dev = &pdev->dev;
+	struct resource *base;
+	int ret;
+
+	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+	if (!pcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	pci->dev = dev;
+	pci->ops = &dw_pcie_ops;
+
+	pcie->pci = pci;
+
+	pcie->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(pcie->clk))
+		return PTR_ERR(pcie->clk);
+
+	ret = clk_prepare_enable(pcie->clk);
+	if (ret)
+		return ret;
+
+	pcie->clk_reg = devm_clk_get(dev, "reg");
+	if (pcie->clk_reg == ERR_PTR(-EPROBE_DEFER)) {
+		ret = -EPROBE_DEFER;
+		goto fail;
+	}
+	if (!IS_ERR(pcie->clk_reg)) {
+		ret = clk_prepare_enable(pcie->clk_reg);
+		if (ret)
+			goto fail_clkreg;
+	}
+
+	/* Get the dw-pcie unit configuration/control registers base. */
+	base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
+	pci->dbi_base = devm_pci_remap_cfg_resource(dev, base);
+	if (IS_ERR(pci->dbi_base)) {
+		dev_err(dev, "couldn't remap regs base %p\n", base);
+		ret = PTR_ERR(pci->dbi_base);
+		goto fail_clkreg;
+	}
+
+	ret = armada8k_pcie_setup_phys(pcie);
+	if (ret)
+		goto fail_clkreg;
+
+	platform_set_drvdata(pdev, pcie);
+
+	ret = armada8k_add_pcie_port(pcie, pdev);
+	if (ret)
+		goto disable_phy;
+
+	return 0;
+
+disable_phy:
+	armada8k_pcie_disable_phys(pcie);
+fail_clkreg:
+	clk_disable_unprepare(pcie->clk_reg);
+fail:
+	clk_disable_unprepare(pcie->clk);
+
+	return ret;
+}
+
+static const struct of_device_id armada8k_pcie_of_match[] = {
+	{ .compatible = "marvell,armada8k-pcie", },
+	{},
+};
+
+static struct platform_driver armada8k_pcie_driver = {
+	.probe		= armada8k_pcie_probe,
+	.driver = {
+		.name	= "armada8k-pcie",
+		.of_match_table = of_match_ptr(armada8k_pcie_of_match),
+		.suppress_bind_attrs = true,
+	},
+};
+builtin_platform_driver(armada8k_pcie_driver);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-artpec6.c b/marvell/linux/drivers/pci/controller/dwc/pcie-artpec6.c
new file mode 100644
index 0000000..d00252b
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -0,0 +1,617 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Axis ARTPEC-6 SoC
+ *
+ * Author: Niklas Cassel <niklas.cassel@axis.com>
+ *
+ * Based on work done by Phil Edworthy <phil@edworthys.org>
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include "pcie-designware.h"
+
+#define to_artpec6_pcie(x)	dev_get_drvdata((x)->dev)
+
+enum artpec_pcie_variants {
+	ARTPEC6,
+	ARTPEC7,
+};
+
+struct artpec6_pcie {
+	struct dw_pcie		*pci;
+	struct regmap		*regmap;	/* DT axis,syscon-pcie */
+	void __iomem		*phy_base;	/* DT phy */
+	enum artpec_pcie_variants variant;
+	enum dw_pcie_device_mode mode;
+};
+
+struct artpec_pcie_of_data {
+	enum artpec_pcie_variants variant;
+	enum dw_pcie_device_mode mode;
+};
+
+static const struct of_device_id artpec6_pcie_of_match[];
+
+/* PCIe Port Logic registers (memory-mapped) */
+#define PL_OFFSET			0x700
+
+#define ACK_F_ASPM_CTRL_OFF		(PL_OFFSET + 0xc)
+#define ACK_N_FTS_MASK			GENMASK(15, 8)
+#define ACK_N_FTS(x)			(((x) << 8) & ACK_N_FTS_MASK)
+
+#define FAST_TRAINING_SEQ_MASK		GENMASK(7, 0)
+#define FAST_TRAINING_SEQ(x)		(((x) << 0) & FAST_TRAINING_SEQ_MASK)
+
+/* ARTPEC-6 specific registers */
+#define PCIECFG				0x18
+#define  PCIECFG_DBG_OEN		BIT(24)
+#define  PCIECFG_CORE_RESET_REQ		BIT(21)
+#define  PCIECFG_LTSSM_ENABLE		BIT(20)
+#define  PCIECFG_DEVICE_TYPE_MASK	GENMASK(19, 16)
+#define  PCIECFG_CLKREQ_B		BIT(11)
+#define  PCIECFG_REFCLK_ENABLE		BIT(10)
+#define  PCIECFG_PLL_ENABLE		BIT(9)
+#define  PCIECFG_PCLK_ENABLE		BIT(8)
+#define  PCIECFG_RISRCREN		BIT(4)
+#define  PCIECFG_MODE_TX_DRV_EN		BIT(3)
+#define  PCIECFG_CISRREN		BIT(2)
+#define  PCIECFG_MACRO_ENABLE		BIT(0)
+/* ARTPEC-7 specific fields */
+#define  PCIECFG_REFCLKSEL		BIT(23)
+#define  PCIECFG_NOC_RESET		BIT(3)
+
+#define PCIESTAT			0x1c
+/* ARTPEC-7 specific fields */
+#define  PCIESTAT_EXTREFCLK		BIT(3)
+
+#define NOCCFG				0x40
+#define  NOCCFG_ENABLE_CLK_PCIE		BIT(4)
+#define  NOCCFG_POWER_PCIE_IDLEACK	BIT(3)
+#define  NOCCFG_POWER_PCIE_IDLE		BIT(2)
+#define  NOCCFG_POWER_PCIE_IDLEREQ	BIT(1)
+
+#define PHY_STATUS			0x118
+#define  PHY_COSPLLLOCK			BIT(0)
+
+#define PHY_TX_ASIC_OUT			0x4040
+#define  PHY_TX_ASIC_OUT_TX_ACK		BIT(0)
+
+#define PHY_RX_ASIC_OUT			0x405c
+#define  PHY_RX_ASIC_OUT_ACK		BIT(0)
+
+static u32 artpec6_pcie_readl(struct artpec6_pcie *artpec6_pcie, u32 offset)
+{
+	u32 val;
+
+	regmap_read(artpec6_pcie->regmap, offset, &val);
+	return val;
+}
+
+static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u32 val)
+{
+	regmap_write(artpec6_pcie->regmap, offset, val);
+}
+
+static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
+{
+	struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
+	struct pcie_port *pp = &pci->pp;
+	struct dw_pcie_ep *ep = &pci->ep;
+
+	switch (artpec6_pcie->mode) {
+	case DW_PCIE_RC_TYPE:
+		return pci_addr - pp->cfg0_base;
+	case DW_PCIE_EP_TYPE:
+		return pci_addr - ep->phys_base;
+	default:
+		dev_err(pci->dev, "UNKNOWN device type\n");
+	}
+	return pci_addr;
+}
+
+static int artpec6_pcie_establish_link(struct dw_pcie *pci)
+{
+	struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
+	u32 val;
+
+	val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+	val |= PCIECFG_LTSSM_ENABLE;
+	artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+
+	return 0;
+}
+
+static void artpec6_pcie_stop_link(struct dw_pcie *pci)
+{
+	struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
+	u32 val;
+
+	val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+	val &= ~PCIECFG_LTSSM_ENABLE;
+	artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	.cpu_addr_fixup = artpec6_pcie_cpu_addr_fixup,
+	.start_link = artpec6_pcie_establish_link,
+	.stop_link = artpec6_pcie_stop_link,
+};
+
+static void artpec6_pcie_wait_for_phy_a6(struct artpec6_pcie *artpec6_pcie)
+{
+	struct dw_pcie *pci = artpec6_pcie->pci;
+	struct device *dev = pci->dev;
+	u32 val;
+	unsigned int retries;
+
+	retries = 50;
+	do {
+		usleep_range(1000, 2000);
+		val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
+		retries--;
+	} while (retries &&
+		(val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE)));
+	if (!retries)
+		dev_err(dev, "PCIe clock manager did not leave idle state\n");
+
+	retries = 50;
+	do {
+		usleep_range(1000, 2000);
+		val = readl(artpec6_pcie->phy_base + PHY_STATUS);
+		retries--;
+	} while (retries && !(val & PHY_COSPLLLOCK));
+	if (!retries)
+		dev_err(dev, "PHY PLL did not lock\n");
+}
+
+static void artpec6_pcie_wait_for_phy_a7(struct artpec6_pcie *artpec6_pcie)
+{
+	struct dw_pcie *pci = artpec6_pcie->pci;
+	struct device *dev = pci->dev;
+	u32 val;
+	u16 phy_status_tx, phy_status_rx;
+	unsigned int retries;
+
+	retries = 50;
+	do {
+		usleep_range(1000, 2000);
+		val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
+		retries--;
+	} while (retries &&
+		(val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE)));
+	if (!retries)
+		dev_err(dev, "PCIe clock manager did not leave idle state\n");
+
+	retries = 50;
+	do {
+		usleep_range(1000, 2000);
+		phy_status_tx = readw(artpec6_pcie->phy_base + PHY_TX_ASIC_OUT);
+		phy_status_rx = readw(artpec6_pcie->phy_base + PHY_RX_ASIC_OUT);
+		retries--;
+	} while (retries && ((phy_status_tx & PHY_TX_ASIC_OUT_TX_ACK) ||
+				(phy_status_rx & PHY_RX_ASIC_OUT_ACK)));
+	if (!retries)
+		dev_err(dev, "PHY did not enter Pn state\n");
+}
+
+static void artpec6_pcie_wait_for_phy(struct artpec6_pcie *artpec6_pcie)
+{
+	switch (artpec6_pcie->variant) {
+	case ARTPEC6:
+		artpec6_pcie_wait_for_phy_a6(artpec6_pcie);
+		break;
+	case ARTPEC7:
+		artpec6_pcie_wait_for_phy_a7(artpec6_pcie);
+		break;
+	}
+}
+
+static void artpec6_pcie_init_phy_a6(struct artpec6_pcie *artpec6_pcie)
+{
+	u32 val;
+
+	val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+	val |=  PCIECFG_RISRCREN |	/* Receiver term. 50 Ohm */
+		PCIECFG_MODE_TX_DRV_EN |
+		PCIECFG_CISRREN |	/* Reference clock term. 100 Ohm */
+		PCIECFG_MACRO_ENABLE;
+	val |= PCIECFG_REFCLK_ENABLE;
+	val &= ~PCIECFG_DBG_OEN;
+	val &= ~PCIECFG_CLKREQ_B;
+	artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+	usleep_range(5000, 6000);
+
+	val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
+	val |= NOCCFG_ENABLE_CLK_PCIE;
+	artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
+	usleep_range(20, 30);
+
+	val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+	val |= PCIECFG_PCLK_ENABLE | PCIECFG_PLL_ENABLE;
+	artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+	usleep_range(6000, 7000);
+
+	val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
+	val &= ~NOCCFG_POWER_PCIE_IDLEREQ;
+	artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
+}
+
+static void artpec6_pcie_init_phy_a7(struct artpec6_pcie *artpec6_pcie)
+{
+	struct dw_pcie *pci = artpec6_pcie->pci;
+	u32 val;
+	bool extrefclk;
+
+	/* Check if external reference clock is connected */
+	val = artpec6_pcie_readl(artpec6_pcie, PCIESTAT);
+	extrefclk = !!(val & PCIESTAT_EXTREFCLK);
+	dev_dbg(pci->dev, "Using reference clock: %s\n",
+		extrefclk ? "external" : "internal");
+
+	val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+	val |=  PCIECFG_RISRCREN |	/* Receiver term. 50 Ohm */
+		PCIECFG_PCLK_ENABLE;
+	if (extrefclk)
+		val |= PCIECFG_REFCLKSEL;
+	else
+		val &= ~PCIECFG_REFCLKSEL;
+	artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+	usleep_range(10, 20);
+
+	val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
+	val |= NOCCFG_ENABLE_CLK_PCIE;
+	artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
+	usleep_range(20, 30);
+
+	val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
+	val &= ~NOCCFG_POWER_PCIE_IDLEREQ;
+	artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
+}
+
+static void artpec6_pcie_init_phy(struct artpec6_pcie *artpec6_pcie)
+{
+	switch (artpec6_pcie->variant) {
+	case ARTPEC6:
+		artpec6_pcie_init_phy_a6(artpec6_pcie);
+		break;
+	case ARTPEC7:
+		artpec6_pcie_init_phy_a7(artpec6_pcie);
+		break;
+	}
+}
+
+static void artpec6_pcie_set_nfts(struct artpec6_pcie *artpec6_pcie)
+{
+	struct dw_pcie *pci = artpec6_pcie->pci;
+	u32 val;
+
+	if (artpec6_pcie->variant != ARTPEC7)
+		return;
+
+	/*
+	 * Increase the N_FTS (Number of Fast Training Sequences)
+	 * to be transmitted when transitioning from L0s to L0.
+	 */
+	val = dw_pcie_readl_dbi(pci, ACK_F_ASPM_CTRL_OFF);
+	val &= ~ACK_N_FTS_MASK;
+	val |= ACK_N_FTS(180);
+	dw_pcie_writel_dbi(pci, ACK_F_ASPM_CTRL_OFF, val);
+
+	/*
+	 * Set the Number of Fast Training Sequences that the core
+	 * advertises as its N_FTS during Gen2 or Gen3 link training.
+	 */
+	val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+	val &= ~FAST_TRAINING_SEQ_MASK;
+	val |= FAST_TRAINING_SEQ(180);
+	dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+}
+
+static void artpec6_pcie_assert_core_reset(struct artpec6_pcie *artpec6_pcie)
+{
+	u32 val;
+
+	val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+	switch (artpec6_pcie->variant) {
+	case ARTPEC6:
+		val |= PCIECFG_CORE_RESET_REQ;
+		break;
+	case ARTPEC7:
+		val &= ~PCIECFG_NOC_RESET;
+		break;
+	}
+	artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+}
+
+static void artpec6_pcie_deassert_core_reset(struct artpec6_pcie *artpec6_pcie)
+{
+	u32 val;
+
+	val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+	switch (artpec6_pcie->variant) {
+	case ARTPEC6:
+		val &= ~PCIECFG_CORE_RESET_REQ;
+		break;
+	case ARTPEC7:
+		val |= PCIECFG_NOC_RESET;
+		break;
+	}
+	artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+	usleep_range(100, 200);
+}
+
+static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie)
+{
+	struct dw_pcie *pci = artpec6_pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		dw_pcie_msi_init(pp);
+}
+
+static int artpec6_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
+
+	artpec6_pcie_assert_core_reset(artpec6_pcie);
+	artpec6_pcie_init_phy(artpec6_pcie);
+	artpec6_pcie_deassert_core_reset(artpec6_pcie);
+	artpec6_pcie_wait_for_phy(artpec6_pcie);
+	artpec6_pcie_set_nfts(artpec6_pcie);
+	dw_pcie_setup_rc(pp);
+	artpec6_pcie_establish_link(pci);
+	dw_pcie_wait_for_link(pci);
+	artpec6_pcie_enable_interrupts(artpec6_pcie);
+
+	return 0;
+}
+
+static const struct dw_pcie_host_ops artpec6_pcie_host_ops = {
+	.host_init = artpec6_pcie_host_init,
+};
+
+static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
+				 struct platform_device *pdev)
+{
+	struct dw_pcie *pci = artpec6_pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = pci->dev;
+	int ret;
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		pp->msi_irq = platform_get_irq_byname(pdev, "msi");
+		if (pp->msi_irq < 0) {
+			dev_err(dev, "failed to get MSI irq\n");
+			return pp->msi_irq;
+		}
+	}
+
+	pp->ops = &artpec6_pcie_host_ops;
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "failed to initialize host\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
+	enum pci_barno bar;
+
+	artpec6_pcie_assert_core_reset(artpec6_pcie);
+	artpec6_pcie_init_phy(artpec6_pcie);
+	artpec6_pcie_deassert_core_reset(artpec6_pcie);
+	artpec6_pcie_wait_for_phy(artpec6_pcie);
+	artpec6_pcie_set_nfts(artpec6_pcie);
+
+	for (bar = BAR_0; bar <= BAR_5; bar++)
+		dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+				  enum pci_epc_irq_type type, u16 interrupt_num)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	switch (type) {
+	case PCI_EPC_IRQ_LEGACY:
+		dev_err(pci->dev, "EP cannot trigger legacy IRQs\n");
+		return -EINVAL;
+	case PCI_EPC_IRQ_MSI:
+		return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+	default:
+		dev_err(pci->dev, "UNKNOWN IRQ type\n");
+	}
+
+	return 0;
+}
+
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+	.ep_init = artpec6_pcie_ep_init,
+	.raise_irq = artpec6_pcie_raise_irq,
+};
+
+static int artpec6_add_pcie_ep(struct artpec6_pcie *artpec6_pcie,
+			       struct platform_device *pdev)
+{
+	int ret;
+	struct dw_pcie_ep *ep;
+	struct resource *res;
+	struct device *dev = &pdev->dev;
+	struct dw_pcie *pci = artpec6_pcie->pci;
+
+	ep = &pci->ep;
+	ep->ops = &pcie_ep_ops;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
+	pci->dbi_base2 = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pci->dbi_base2))
+		return PTR_ERR(pci->dbi_base2);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+	if (!res)
+		return -EINVAL;
+
+	ep->phys_base = res->start;
+	ep->addr_size = resource_size(res);
+
+	ret = dw_pcie_ep_init(ep);
+	if (ret) {
+		dev_err(dev, "failed to initialize endpoint\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int artpec6_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dw_pcie *pci;
+	struct artpec6_pcie *artpec6_pcie;
+	struct resource *dbi_base;
+	struct resource *phy_base;
+	int ret;
+	const struct of_device_id *match;
+	const struct artpec_pcie_of_data *data;
+	enum artpec_pcie_variants variant;
+	enum dw_pcie_device_mode mode;
+
+	match = of_match_device(artpec6_pcie_of_match, dev);
+	if (!match)
+		return -EINVAL;
+
+	data = (struct artpec_pcie_of_data *)match->data;
+	variant = (enum artpec_pcie_variants)data->variant;
+	mode = (enum dw_pcie_device_mode)data->mode;
+
+	artpec6_pcie = devm_kzalloc(dev, sizeof(*artpec6_pcie), GFP_KERNEL);
+	if (!artpec6_pcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	pci->dev = dev;
+	pci->ops = &dw_pcie_ops;
+
+	artpec6_pcie->pci = pci;
+	artpec6_pcie->variant = variant;
+	artpec6_pcie->mode = mode;
+
+	dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+	pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
+	if (IS_ERR(pci->dbi_base))
+		return PTR_ERR(pci->dbi_base);
+
+	phy_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
+	artpec6_pcie->phy_base = devm_ioremap_resource(dev, phy_base);
+	if (IS_ERR(artpec6_pcie->phy_base))
+		return PTR_ERR(artpec6_pcie->phy_base);
+
+	artpec6_pcie->regmap =
+		syscon_regmap_lookup_by_phandle(dev->of_node,
+						"axis,syscon-pcie");
+	if (IS_ERR(artpec6_pcie->regmap))
+		return PTR_ERR(artpec6_pcie->regmap);
+
+	platform_set_drvdata(pdev, artpec6_pcie);
+
+	switch (artpec6_pcie->mode) {
+	case DW_PCIE_RC_TYPE:
+		if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_HOST))
+			return -ENODEV;
+
+		ret = artpec6_add_pcie_port(artpec6_pcie, pdev);
+		if (ret < 0)
+			return ret;
+		break;
+	case DW_PCIE_EP_TYPE: {
+		u32 val;
+
+		if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_EP))
+			return -ENODEV;
+
+		val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
+		val &= ~PCIECFG_DEVICE_TYPE_MASK;
+		artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
+		ret = artpec6_add_pcie_ep(artpec6_pcie, pdev);
+		if (ret < 0)
+			return ret;
+		break;
+	}
+	default:
+		dev_err(dev, "INVALID device type %d\n", artpec6_pcie->mode);
+	}
+
+	return 0;
+}
+
+static const struct artpec_pcie_of_data artpec6_pcie_rc_of_data = {
+	.variant = ARTPEC6,
+	.mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct artpec_pcie_of_data artpec6_pcie_ep_of_data = {
+	.variant = ARTPEC6,
+	.mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct artpec_pcie_of_data artpec7_pcie_rc_of_data = {
+	.variant = ARTPEC7,
+	.mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct artpec_pcie_of_data artpec7_pcie_ep_of_data = {
+	.variant = ARTPEC7,
+	.mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct of_device_id artpec6_pcie_of_match[] = {
+	{
+		.compatible = "axis,artpec6-pcie",
+		.data = &artpec6_pcie_rc_of_data,
+	},
+	{
+		.compatible = "axis,artpec6-pcie-ep",
+		.data = &artpec6_pcie_ep_of_data,
+	},
+	{
+		.compatible = "axis,artpec7-pcie",
+		.data = &artpec7_pcie_rc_of_data,
+	},
+	{
+		.compatible = "axis,artpec7-pcie-ep",
+		.data = &artpec7_pcie_ep_of_data,
+	},
+	{},
+};
+
+static struct platform_driver artpec6_pcie_driver = {
+	.probe = artpec6_pcie_probe,
+	.driver = {
+		.name	= "artpec6-pcie",
+		.of_match_table = artpec6_pcie_of_match,
+		.suppress_bind_attrs = true,
+	},
+};
+builtin_platform_driver(artpec6_pcie_driver);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-designware-ep.c b/marvell/linux/drivers/pci/controller/dwc/pcie-designware-ep.c
new file mode 100644
index 0000000..d6d9667
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -0,0 +1,611 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Synopsys DesignWare PCIe Endpoint controller driver
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+#include <linux/of.h>
+
+#include "pcie-designware.h"
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
+void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
+{
+	struct pci_epc *epc = ep->epc;
+
+	pci_epc_linkup(epc);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
+
+static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar,
+				   int flags)
+{
+	u32 reg;
+
+	reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+	dw_pcie_dbi_ro_wr_en(pci);
+	dw_pcie_writel_dbi2(pci, reg, 0x0);
+	dw_pcie_writel_dbi(pci, reg, 0x0);
+	if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+		dw_pcie_writel_dbi2(pci, reg + 4, 0x0);
+		dw_pcie_writel_dbi(pci, reg + 4, 0x0);
+	}
+	dw_pcie_dbi_ro_wr_dis(pci);
+}
+
+void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
+{
+	__dw_pcie_ep_reset_bar(pci, bar, 0);
+}
+
+static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
+				   struct pci_epf_header *hdr)
+{
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	dw_pcie_dbi_ro_wr_en(pci);
+	dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, hdr->vendorid);
+	dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, hdr->deviceid);
+	dw_pcie_writeb_dbi(pci, PCI_REVISION_ID, hdr->revid);
+	dw_pcie_writeb_dbi(pci, PCI_CLASS_PROG, hdr->progif_code);
+	dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE,
+			   hdr->subclass_code | hdr->baseclass_code << 8);
+	dw_pcie_writeb_dbi(pci, PCI_CACHE_LINE_SIZE,
+			   hdr->cache_line_size);
+	dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_VENDOR_ID,
+			   hdr->subsys_vendor_id);
+	dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_ID, hdr->subsys_id);
+	dw_pcie_writeb_dbi(pci, PCI_INTERRUPT_PIN,
+			   hdr->interrupt_pin);
+	dw_pcie_dbi_ro_wr_dis(pci);
+
+	return 0;
+}
+
+static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar,
+				  dma_addr_t cpu_addr,
+				  enum dw_pcie_as_type as_type)
+{
+	int ret;
+	u32 free_win;
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	free_win = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows);
+	if (free_win >= ep->num_ib_windows) {
+		dev_err(pci->dev, "No free inbound window\n");
+		return -EINVAL;
+	}
+
+	ret = dw_pcie_prog_inbound_atu(pci, free_win, bar, cpu_addr,
+				       as_type);
+	if (ret < 0) {
+		dev_err(pci->dev, "Failed to program IB window\n");
+		return ret;
+	}
+
+	ep->bar_to_atu[bar] = free_win;
+	set_bit(free_win, ep->ib_window_map);
+
+	return 0;
+}
+
+static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr,
+				   u64 pci_addr, size_t size)
+{
+	u32 free_win;
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows);
+	if (free_win >= ep->num_ob_windows) {
+		dev_err(pci->dev, "No free outbound window\n");
+		return -EINVAL;
+	}
+
+	dw_pcie_prog_outbound_atu(pci, free_win, PCIE_ATU_TYPE_MEM,
+				  phys_addr, pci_addr, size);
+
+	set_bit(free_win, ep->ob_window_map);
+	ep->outbound_addr[free_win] = phys_addr;
+
+	return 0;
+}
+
+static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
+				 struct pci_epf_bar *epf_bar)
+{
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	enum pci_barno bar = epf_bar->barno;
+	u32 atu_index = ep->bar_to_atu[bar];
+
+	__dw_pcie_ep_reset_bar(pci, bar, epf_bar->flags);
+
+	dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
+	clear_bit(atu_index, ep->ib_window_map);
+}
+
+static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
+			      struct pci_epf_bar *epf_bar)
+{
+	int ret;
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	enum pci_barno bar = epf_bar->barno;
+	size_t size = epf_bar->size;
+	int flags = epf_bar->flags;
+	enum dw_pcie_as_type as_type;
+	u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+
+	if (!(flags & PCI_BASE_ADDRESS_SPACE))
+		as_type = DW_PCIE_AS_MEM;
+	else
+		as_type = DW_PCIE_AS_IO;
+
+	ret = dw_pcie_ep_inbound_atu(ep, bar, epf_bar->phys_addr, as_type);
+	if (ret)
+		return ret;
+
+	dw_pcie_dbi_ro_wr_en(pci);
+
+	dw_pcie_writel_dbi2(pci, reg, lower_32_bits(size - 1));
+	dw_pcie_writel_dbi(pci, reg, flags);
+
+	if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+		dw_pcie_writel_dbi2(pci, reg + 4, upper_32_bits(size - 1));
+		dw_pcie_writel_dbi(pci, reg + 4, 0);
+	}
+
+	dw_pcie_dbi_ro_wr_dis(pci);
+
+	return 0;
+}
+
+static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
+			      u32 *atu_index)
+{
+	u32 index;
+
+	for (index = 0; index < ep->num_ob_windows; index++) {
+		if (ep->outbound_addr[index] != addr)
+			continue;
+		*atu_index = index;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
+				  phys_addr_t addr)
+{
+	int ret;
+	u32 atu_index;
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	ret = dw_pcie_find_index(ep, addr, &atu_index);
+	if (ret < 0)
+		return;
+
+	dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND);
+	clear_bit(atu_index, ep->ob_window_map);
+}
+
+static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
+			       phys_addr_t addr,
+			       u64 pci_addr, size_t size)
+{
+	int ret;
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	ret = dw_pcie_ep_outbound_atu(ep, addr, pci_addr, size);
+	if (ret) {
+		dev_err(pci->dev, "Failed to enable address\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
+{
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	u32 val, reg;
+
+	if (!ep->msi_cap)
+		return -EINVAL;
+
+	reg = ep->msi_cap + PCI_MSI_FLAGS;
+	val = dw_pcie_readw_dbi(pci, reg);
+	if (!(val & PCI_MSI_FLAGS_ENABLE))
+		return -EINVAL;
+
+	val = (val & PCI_MSI_FLAGS_QSIZE) >> 4;
+
+	return val;
+}
+
+static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
+{
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	u32 val, reg;
+
+	if (!ep->msi_cap)
+		return -EINVAL;
+
+	reg = ep->msi_cap + PCI_MSI_FLAGS;
+	val = dw_pcie_readw_dbi(pci, reg);
+	val &= ~PCI_MSI_FLAGS_QMASK;
+	val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK;
+	dw_pcie_dbi_ro_wr_en(pci);
+	dw_pcie_writew_dbi(pci, reg, val);
+	dw_pcie_dbi_ro_wr_dis(pci);
+
+	return 0;
+}
+
+static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
+{
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	u32 val, reg;
+
+	if (!ep->msix_cap)
+		return -EINVAL;
+
+	reg = ep->msix_cap + PCI_MSIX_FLAGS;
+	val = dw_pcie_readw_dbi(pci, reg);
+	if (!(val & PCI_MSIX_FLAGS_ENABLE))
+		return -EINVAL;
+
+	val &= PCI_MSIX_FLAGS_QSIZE;
+
+	return val;
+}
+
+static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
+{
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	u32 val, reg;
+
+	if (!ep->msix_cap)
+		return -EINVAL;
+
+	reg = ep->msix_cap + PCI_MSIX_FLAGS;
+	val = dw_pcie_readw_dbi(pci, reg);
+	val &= ~PCI_MSIX_FLAGS_QSIZE;
+	val |= interrupts;
+	dw_pcie_dbi_ro_wr_en(pci);
+	dw_pcie_writew_dbi(pci, reg, val);
+	dw_pcie_dbi_ro_wr_dis(pci);
+
+	return 0;
+}
+
+static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
+				enum pci_epc_irq_type type, u16 interrupt_num)
+{
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+
+	if (!ep->ops->raise_irq)
+		return -EINVAL;
+
+	return ep->ops->raise_irq(ep, func_no, type, interrupt_num);
+}
+
+static void dw_pcie_ep_stop(struct pci_epc *epc)
+{
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	if (!pci->ops->stop_link)
+		return;
+
+	pci->ops->stop_link(pci);
+}
+
+static int dw_pcie_ep_start(struct pci_epc *epc)
+{
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	if (!pci->ops->start_link)
+		return -EINVAL;
+
+	return pci->ops->start_link(pci);
+}
+
+static const struct pci_epc_features*
+dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+{
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+
+	if (!ep->ops->get_features)
+		return NULL;
+
+	return ep->ops->get_features(ep);
+}
+
+static const struct pci_epc_ops epc_ops = {
+	.write_header		= dw_pcie_ep_write_header,
+	.set_bar		= dw_pcie_ep_set_bar,
+	.clear_bar		= dw_pcie_ep_clear_bar,
+	.map_addr		= dw_pcie_ep_map_addr,
+	.unmap_addr		= dw_pcie_ep_unmap_addr,
+	.set_msi		= dw_pcie_ep_set_msi,
+	.get_msi		= dw_pcie_ep_get_msi,
+	.set_msix		= dw_pcie_ep_set_msix,
+	.get_msix		= dw_pcie_ep_get_msix,
+	.raise_irq		= dw_pcie_ep_raise_irq,
+	.start			= dw_pcie_ep_start,
+	.stop			= dw_pcie_ep_stop,
+	.get_features		= dw_pcie_ep_get_features,
+};
+
+int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	struct device *dev = pci->dev;
+
+	dev_err(dev, "EP cannot trigger legacy IRQs\n");
+
+	return -EINVAL;
+}
+
+int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
+			     u8 interrupt_num)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	struct pci_epc *epc = ep->epc;
+	unsigned int aligned_offset;
+	u16 msg_ctrl, msg_data;
+	u32 msg_addr_lower, msg_addr_upper, reg;
+	u64 msg_addr;
+	bool has_upper;
+	int ret;
+
+	if (!ep->msi_cap)
+		return -EINVAL;
+
+	/* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
+	reg = ep->msi_cap + PCI_MSI_FLAGS;
+	msg_ctrl = dw_pcie_readw_dbi(pci, reg);
+	has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
+	reg = ep->msi_cap + PCI_MSI_ADDRESS_LO;
+	msg_addr_lower = dw_pcie_readl_dbi(pci, reg);
+	if (has_upper) {
+		reg = ep->msi_cap + PCI_MSI_ADDRESS_HI;
+		msg_addr_upper = dw_pcie_readl_dbi(pci, reg);
+		reg = ep->msi_cap + PCI_MSI_DATA_64;
+		msg_data = dw_pcie_readw_dbi(pci, reg);
+	} else {
+		msg_addr_upper = 0;
+		reg = ep->msi_cap + PCI_MSI_DATA_32;
+		msg_data = dw_pcie_readw_dbi(pci, reg);
+	}
+	aligned_offset = msg_addr_lower & (epc->mem->page_size - 1);
+	msg_addr = ((u64)msg_addr_upper) << 32 |
+			(msg_addr_lower & ~aligned_offset);
+	ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
+				  epc->mem->page_size);
+	if (ret)
+		return ret;
+
+	writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset);
+
+	dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
+
+	return 0;
+}
+
+int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+			     u16 interrupt_num)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	struct pci_epc *epc = ep->epc;
+	u16 tbl_offset, bir;
+	u32 bar_addr_upper, bar_addr_lower;
+	u32 msg_addr_upper, msg_addr_lower;
+	u32 reg, msg_data, vec_ctrl;
+	u64 tbl_addr, msg_addr, reg_u64;
+	void __iomem *msix_tbl;
+	int ret;
+
+	reg = ep->msix_cap + PCI_MSIX_TABLE;
+	tbl_offset = dw_pcie_readl_dbi(pci, reg);
+	bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
+	tbl_offset &= PCI_MSIX_TABLE_OFFSET;
+
+	reg = PCI_BASE_ADDRESS_0 + (4 * bir);
+	bar_addr_upper = 0;
+	bar_addr_lower = dw_pcie_readl_dbi(pci, reg);
+	reg_u64 = (bar_addr_lower & PCI_BASE_ADDRESS_MEM_TYPE_MASK);
+	if (reg_u64 == PCI_BASE_ADDRESS_MEM_TYPE_64)
+		bar_addr_upper = dw_pcie_readl_dbi(pci, reg + 4);
+
+	tbl_addr = ((u64) bar_addr_upper) << 32 | bar_addr_lower;
+	tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE));
+	tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK;
+
+	msix_tbl = ioremap_nocache(ep->phys_base + tbl_addr,
+				   PCI_MSIX_ENTRY_SIZE);
+	if (!msix_tbl)
+		return -EINVAL;
+
+	msg_addr_lower = readl(msix_tbl + PCI_MSIX_ENTRY_LOWER_ADDR);
+	msg_addr_upper = readl(msix_tbl + PCI_MSIX_ENTRY_UPPER_ADDR);
+	msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
+	msg_data = readl(msix_tbl + PCI_MSIX_ENTRY_DATA);
+	vec_ctrl = readl(msix_tbl + PCI_MSIX_ENTRY_VECTOR_CTRL);
+
+	iounmap(msix_tbl);
+
+	if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) {
+		dev_dbg(pci->dev, "MSI-X entry ctrl set\n");
+		return -EPERM;
+	}
+
+	ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
+				  epc->mem->page_size);
+	if (ret)
+		return ret;
+
+	writel(msg_data, ep->msi_mem);
+
+	dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
+
+	return 0;
+}
+
+void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+{
+	struct pci_epc *epc = ep->epc;
+
+	pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
+			      epc->mem->page_size);
+
+	pci_epc_mem_exit(epc);
+}
+
+static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
+{
+	u32 header;
+	int pos = PCI_CFG_SPACE_SIZE;
+
+	while (pos) {
+		header = dw_pcie_readl_dbi(pci, pos);
+		if (PCI_EXT_CAP_ID(header) == cap)
+			return pos;
+
+		pos = PCI_EXT_CAP_NEXT(header);
+		if (!pos)
+			break;
+	}
+
+	return 0;
+}
+
+int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+	int i;
+	int ret;
+	u32 reg;
+	void *addr;
+	u8 hdr_type;
+	unsigned int nbars;
+	unsigned int offset;
+	struct pci_epc *epc;
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	struct device *dev = pci->dev;
+	struct device_node *np = dev->of_node;
+
+	if (!pci->dbi_base || !pci->dbi_base2) {
+		dev_err(dev, "dbi_base/dbi_base2 is not populated\n");
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows);
+	if (ret < 0) {
+		dev_err(dev, "Unable to read *num-ib-windows* property\n");
+		return ret;
+	}
+	if (ep->num_ib_windows > MAX_IATU_IN) {
+		dev_err(dev, "Invalid *num-ib-windows*\n");
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows);
+	if (ret < 0) {
+		dev_err(dev, "Unable to read *num-ob-windows* property\n");
+		return ret;
+	}
+	if (ep->num_ob_windows > MAX_IATU_OUT) {
+		dev_err(dev, "Invalid *num-ob-windows*\n");
+		return -EINVAL;
+	}
+
+	ep->ib_window_map = devm_kcalloc(dev,
+					 BITS_TO_LONGS(ep->num_ib_windows),
+					 sizeof(long),
+					 GFP_KERNEL);
+	if (!ep->ib_window_map)
+		return -ENOMEM;
+
+	ep->ob_window_map = devm_kcalloc(dev,
+					 BITS_TO_LONGS(ep->num_ob_windows),
+					 sizeof(long),
+					 GFP_KERNEL);
+	if (!ep->ob_window_map)
+		return -ENOMEM;
+
+	addr = devm_kcalloc(dev, ep->num_ob_windows, sizeof(phys_addr_t),
+			    GFP_KERNEL);
+	if (!addr)
+		return -ENOMEM;
+	ep->outbound_addr = addr;
+
+	epc = devm_pci_epc_create(dev, &epc_ops);
+	if (IS_ERR(epc)) {
+		dev_err(dev, "Failed to create epc device\n");
+		return PTR_ERR(epc);
+	}
+
+	ep->epc = epc;
+	epc_set_drvdata(epc, ep);
+
+	if (ep->ops->ep_init)
+		ep->ops->ep_init(ep);
+
+	hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE);
+	if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
+		dev_err(pci->dev, "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
+			hdr_type);
+		return -EIO;
+	}
+
+	ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
+	if (ret < 0)
+		epc->max_functions = 1;
+
+	ret = __pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
+				 ep->page_size);
+	if (ret < 0) {
+		dev_err(dev, "Failed to initialize address space\n");
+		return ret;
+	}
+
+	ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
+					     epc->mem->page_size);
+	if (!ep->msi_mem) {
+		dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
+		return -ENOMEM;
+	}
+	ep->msi_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
+
+	ep->msix_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSIX);
+
+	offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
+	if (offset) {
+		reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+		nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
+			PCI_REBAR_CTRL_NBAR_SHIFT;
+
+		dw_pcie_dbi_ro_wr_en(pci);
+		for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
+			dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
+		dw_pcie_dbi_ro_wr_dis(pci);
+	}
+
+	dw_pcie_setup(pci);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-designware-host.c b/marvell/linux/drivers/pci/controller/dwc/pcie-designware-host.c
new file mode 100644
index 0000000..43106d9
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -0,0 +1,726 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synopsys DesignWare PCIe host controller driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ */
+
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/pci_regs.h>
+#include <linux/platform_device.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+static struct pci_ops dw_pcie_ops;
+
+static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
+			       u32 *val)
+{
+	struct dw_pcie *pci;
+
+	if (pp->ops->rd_own_conf)
+		return pp->ops->rd_own_conf(pp, where, size, val);
+
+	pci = to_dw_pcie_from_pp(pp);
+	return dw_pcie_read(pci->dbi_base + where, size, val);
+}
+
+static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
+			       u32 val)
+{
+	struct dw_pcie *pci;
+
+	if (pp->ops->wr_own_conf)
+		return pp->ops->wr_own_conf(pp, where, size, val);
+
+	pci = to_dw_pcie_from_pp(pp);
+	return dw_pcie_write(pci->dbi_base + where, size, val);
+}
+
+#ifdef CONFIG_PCI_MSI
+static void dw_msi_ack_irq(struct irq_data *d)
+{
+	irq_chip_ack_parent(d);
+}
+
+static void dw_msi_mask_irq(struct irq_data *d)
+{
+	pci_msi_mask_irq(d);
+	irq_chip_mask_parent(d);
+}
+
+static void dw_msi_unmask_irq(struct irq_data *d)
+{
+	pci_msi_unmask_irq(d);
+	irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip dw_pcie_msi_irq_chip = {
+	.name = "PCI-MSI",
+	.irq_ack = dw_msi_ack_irq,
+	.irq_mask = dw_msi_mask_irq,
+	.irq_unmask = dw_msi_unmask_irq,
+};
+
+static struct msi_domain_info dw_pcie_msi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+		   MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
+	.chip	= &dw_pcie_msi_irq_chip,
+};
+#endif
+/* MSI int handler */
+irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
+{
+	int i, pos, irq;
+	unsigned long val;
+	u32 status, num_ctrls;
+	irqreturn_t ret = IRQ_NONE;
+#ifdef CONFIG_PCIE_ASR1901
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	pm_wakeup_event(pci->dev, 2000);
+#endif
+	num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+	for (i = 0; i < num_ctrls; i++) {
+		dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS +
+					(i * MSI_REG_CTRL_BLOCK_SIZE),
+				    4, &status);
+		if (!status)
+			continue;
+
+		ret = IRQ_HANDLED;
+		val = status;
+		pos = 0;
+		while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
+					    pos)) != MAX_MSI_IRQS_PER_CTRL) {
+			irq = irq_find_mapping(pp->irq_domain,
+					       (i * MAX_MSI_IRQS_PER_CTRL) +
+					       pos);
+			generic_handle_irq(irq);
+			pos++;
+		}
+	}
+
+	return ret;
+}
+
+/* Chained MSI interrupt service routine */
+static void dw_chained_msi_isr(struct irq_desc *desc)
+{
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct pcie_port *pp;
+
+	chained_irq_enter(chip, desc);
+
+	pp = irq_desc_get_handler_data(desc);
+	dw_handle_msi_irq(pp);
+
+	chained_irq_exit(chip, desc);
+}
+
+static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+	struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	u64 msi_target;
+
+	msi_target = (u64)pp->msi_data;
+
+	msg->address_lo = lower_32_bits(msi_target);
+	msg->address_hi = upper_32_bits(msi_target);
+
+	msg->data = d->hwirq;
+
+	dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
+		(int)d->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static int dw_pci_msi_set_affinity(struct irq_data *d,
+				   const struct cpumask *mask, bool force)
+{
+	return -EINVAL;
+}
+
+static void dw_pci_bottom_mask(struct irq_data *d)
+{
+	struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+	unsigned int res, bit, ctrl;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&pp->lock, flags);
+
+	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+
+	pp->irq_mask[ctrl] |= BIT(bit);
+	dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
+			    pp->irq_mask[ctrl]);
+
+	raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static void dw_pci_bottom_unmask(struct irq_data *d)
+{
+	struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+	unsigned int res, bit, ctrl;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&pp->lock, flags);
+
+	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+
+	pp->irq_mask[ctrl] &= ~BIT(bit);
+	dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
+			    pp->irq_mask[ctrl]);
+
+	raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static void dw_pci_bottom_ack(struct irq_data *d)
+{
+	struct pcie_port *pp  = irq_data_get_irq_chip_data(d);
+	unsigned int res, bit, ctrl;
+
+	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
+
+	dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, BIT(bit));
+}
+
+static struct irq_chip dw_pci_msi_bottom_irq_chip = {
+	.name = "DWPCI-MSI",
+	.irq_ack = dw_pci_bottom_ack,
+	.irq_compose_msi_msg = dw_pci_setup_msi_msg,
+	.irq_set_affinity = dw_pci_msi_set_affinity,
+	.irq_mask = dw_pci_bottom_mask,
+	.irq_unmask = dw_pci_bottom_unmask,
+};
+
+static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
+				    unsigned int virq, unsigned int nr_irqs,
+				    void *args)
+{
+	struct pcie_port *pp = domain->host_data;
+	unsigned long flags;
+	u32 i;
+	int bit;
+
+	raw_spin_lock_irqsave(&pp->lock, flags);
+
+	bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
+				      order_base_2(nr_irqs));
+
+	raw_spin_unlock_irqrestore(&pp->lock, flags);
+
+	if (bit < 0)
+		return -ENOSPC;
+
+	for (i = 0; i < nr_irqs; i++)
+		irq_domain_set_info(domain, virq + i, bit + i,
+				    pp->msi_irq_chip,
+				    pp, handle_edge_irq,
+				    NULL, NULL);
+
+	return 0;
+}
+
+static void dw_pcie_irq_domain_free(struct irq_domain *domain,
+				    unsigned int virq, unsigned int nr_irqs)
+{
+	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+	struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&pp->lock, flags);
+
+	bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
+			      order_base_2(nr_irqs));
+
+	raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
+	.alloc	= dw_pcie_irq_domain_alloc,
+	.free	= dw_pcie_irq_domain_free,
+};
+
+int dw_pcie_allocate_domains(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
+
+	pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
+					       &dw_pcie_msi_domain_ops, pp);
+	if (!pp->irq_domain) {
+		dev_err(pci->dev, "Failed to create IRQ domain\n");
+		return -ENOMEM;
+	}
+
+	irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
+#ifdef CONFIG_PCI_MSI
+	pp->msi_domain = pci_msi_create_irq_domain(fwnode,
+						   &dw_pcie_msi_domain_info,
+						   pp->irq_domain);
+	if (!pp->msi_domain) {
+		dev_err(pci->dev, "Failed to create MSI domain\n");
+		irq_domain_remove(pp->irq_domain);
+		return -ENOMEM;
+	}
+#endif
+	return 0;
+}
+
+void dw_pcie_free_msi(struct pcie_port *pp)
+{
+	if (pp->msi_irq) {
+		irq_set_chained_handler(pp->msi_irq, NULL);
+		irq_set_handler_data(pp->msi_irq, NULL);
+	}
+
+	irq_domain_remove(pp->msi_domain);
+	irq_domain_remove(pp->irq_domain);
+
+	if (pp->msi_page)
+		__free_page(pp->msi_page);
+}
+
+void dw_pcie_msi_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct device *dev = pci->dev;
+	u64 msi_target;
+
+	pp->msi_page = alloc_page(GFP_KERNEL);
+	pp->msi_data = dma_map_page(dev, pp->msi_page, 0, PAGE_SIZE,
+				    DMA_FROM_DEVICE);
+	if (dma_mapping_error(dev, pp->msi_data)) {
+		dev_err(dev, "Failed to map MSI data\n");
+		__free_page(pp->msi_page);
+		pp->msi_page = NULL;
+		return;
+	}
+	msi_target = (u64)pp->msi_data;
+
+	/* Program the msi_data */
+	dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
+			    lower_32_bits(msi_target));
+	dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
+			    upper_32_bits(msi_target));
+}
+EXPORT_SYMBOL_GPL(dw_pcie_msi_init);
+
+int dw_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct device *dev = pci->dev;
+	struct device_node *np = dev->of_node;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct resource_entry *win, *tmp;
+	struct pci_bus *child;
+	struct pci_host_bridge *bridge;
+	struct resource *cfg_res;
+	u32 hdr_type;
+	int ret;
+
+	raw_spin_lock_init(&pci->pp.lock);
+
+	cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
+	if (cfg_res) {
+		pp->cfg0_size = resource_size(cfg_res) >> 1;
+		pp->cfg1_size = resource_size(cfg_res) >> 1;
+		pp->cfg0_base = cfg_res->start;
+		pp->cfg1_base = cfg_res->start + pp->cfg0_size;
+	} else if (!pp->va_cfg0_base) {
+		dev_err(dev, "Missing *config* reg space\n");
+	}
+
+	bridge = devm_pci_alloc_host_bridge(dev, 0);
+	if (!bridge)
+		return -ENOMEM;
+
+	ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+					&bridge->windows, &pp->io_base);
+	if (ret)
+		return ret;
+
+	ret = devm_request_pci_bus_resources(dev, &bridge->windows);
+	if (ret)
+		return ret;
+
+	/* Get the I/O and memory ranges from DT */
+	resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
+		switch (resource_type(win->res)) {
+		case IORESOURCE_IO:
+			ret = devm_pci_remap_iospace(dev, win->res,
+						     pp->io_base);
+			if (ret) {
+				dev_warn(dev, "Error %d: failed to map resource %pR\n",
+					 ret, win->res);
+				resource_list_destroy_entry(win);
+			} else {
+				pp->io = win->res;
+				pp->io->name = "I/O";
+				pp->io_size = resource_size(pp->io);
+				pp->io_bus_addr = pp->io->start - win->offset;
+			}
+			break;
+		case IORESOURCE_MEM:
+			pp->mem = win->res;
+			pp->mem->name = "MEM";
+			pp->mem_size = resource_size(pp->mem);
+			pp->mem_bus_addr = pp->mem->start - win->offset;
+			break;
+		case 0:
+			pp->cfg = win->res;
+			pp->cfg0_size = resource_size(pp->cfg) >> 1;
+			pp->cfg1_size = resource_size(pp->cfg) >> 1;
+			pp->cfg0_base = pp->cfg->start;
+			pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
+			break;
+		case IORESOURCE_BUS:
+			pp->busn = win->res;
+			break;
+		}
+	}
+
+	if (!pci->dbi_base) {
+		pci->dbi_base = devm_pci_remap_cfgspace(dev,
+						pp->cfg->start,
+						resource_size(pp->cfg));
+		if (!pci->dbi_base) {
+			dev_err(dev, "Error with ioremap\n");
+			return -ENOMEM;
+		}
+	}
+
+	pp->mem_base = pp->mem->start;
+
+	if (!pp->va_cfg0_base) {
+		pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
+					pp->cfg0_base, pp->cfg0_size);
+		if (!pp->va_cfg0_base) {
+			dev_err(dev, "Error with ioremap in function\n");
+			return -ENOMEM;
+		}
+	}
+
+	if (!pp->va_cfg1_base) {
+		pp->va_cfg1_base = devm_pci_remap_cfgspace(dev,
+						pp->cfg1_base,
+						pp->cfg1_size);
+		if (!pp->va_cfg1_base) {
+			dev_err(dev, "Error with ioremap\n");
+			return -ENOMEM;
+		}
+	}
+
+	ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
+	if (ret)
+		pci->num_viewport = 2;
+
+	if (pci_msi_enabled()) {
+		/*
+		 * If a specific SoC driver needs to change the
+		 * default number of vectors, it needs to implement
+		 * the set_num_vectors callback.
+		 */
+		if (!pp->ops->set_num_vectors) {
+			pp->num_vectors = MSI_DEF_NUM_VECTORS;
+		} else {
+			pp->ops->set_num_vectors(pp);
+
+			if (pp->num_vectors > MAX_MSI_IRQS ||
+			    pp->num_vectors == 0) {
+				dev_err(dev,
+					"Invalid number of vectors\n");
+				return -EINVAL;
+			}
+		}
+
+		if (!pp->ops->msi_host_init) {
+			pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
+
+			ret = dw_pcie_allocate_domains(pp);
+			if (ret)
+				return ret;
+
+			if (pp->msi_irq)
+				irq_set_chained_handler_and_data(pp->msi_irq,
+							    dw_chained_msi_isr,
+							    pp);
+		} else {
+			ret = pp->ops->msi_host_init(pp);
+			if (ret < 0)
+				return ret;
+		}
+	}
+
+	if (pp->ops->host_init) {
+		ret = pp->ops->host_init(pp);
+		if (ret)
+			goto err_free_msi;
+	}
+
+	ret = dw_pcie_rd_own_conf(pp, PCI_HEADER_TYPE, 1, &hdr_type);
+	if (ret != PCIBIOS_SUCCESSFUL) {
+		dev_err(pci->dev, "Failed reading PCI_HEADER_TYPE cfg space reg (ret: 0x%x)\n",
+			ret);
+		ret = pcibios_err_to_errno(ret);
+		goto err_free_msi;
+	}
+	if (hdr_type != PCI_HEADER_TYPE_BRIDGE) {
+		dev_err(pci->dev,
+			"PCIe controller is not set to bridge type (hdr_type: 0x%x)!\n",
+			hdr_type);
+		ret = -EIO;
+		goto err_free_msi;
+	}
+
+	pp->root_bus_nr = pp->busn->start;
+
+	bridge->dev.parent = dev;
+	bridge->sysdata = pp;
+	bridge->busnr = pp->root_bus_nr;
+	bridge->ops = &dw_pcie_ops;
+	bridge->map_irq = of_irq_parse_and_map_pci;
+	bridge->swizzle_irq = pci_common_swizzle;
+
+	ret = pci_scan_root_bus_bridge(bridge);
+	if (ret)
+		goto err_free_msi;
+
+	pp->root_bus = bridge->bus;
+
+	if (pp->ops->scan_bus)
+		pp->ops->scan_bus(pp);
+
+	pci_bus_size_bridges(pp->root_bus);
+	pci_bus_assign_resources(pp->root_bus);
+
+	list_for_each_entry(child, &pp->root_bus->children, node)
+		pcie_bus_configure_settings(child);
+
+	pci_bus_add_devices(pp->root_bus);
+	return 0;
+
+err_free_msi:
+	if (pci_msi_enabled() && !pp->ops->msi_host_init)
+		dw_pcie_free_msi(pp);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_host_init);
+
+void dw_pcie_host_deinit(struct pcie_port *pp)
+{
+	pci_stop_root_bus(pp->root_bus);
+	pci_remove_root_bus(pp->root_bus);
+	if (pci_msi_enabled() && !pp->ops->msi_host_init)
+		dw_pcie_free_msi(pp);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
+
+static int dw_pcie_access_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+				     u32 devfn, int where, int size, u32 *val,
+				     bool write)
+{
+	int ret, type;
+	u32 busdev, cfg_size;
+	u64 cpu_addr;
+	void __iomem *va_cfg_base;
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+	busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
+		 PCIE_ATU_FUNC(PCI_FUNC(devfn));
+
+	if (bus->parent->number == pp->root_bus_nr) {
+		type = PCIE_ATU_TYPE_CFG0;
+		cpu_addr = pp->cfg0_base;
+		cfg_size = pp->cfg0_size;
+		va_cfg_base = pp->va_cfg0_base;
+	} else {
+		type = PCIE_ATU_TYPE_CFG1;
+		cpu_addr = pp->cfg1_base;
+		cfg_size = pp->cfg1_size;
+		va_cfg_base = pp->va_cfg1_base;
+	}
+
+	dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
+				  type, cpu_addr,
+				  busdev, cfg_size);
+	if (write)
+		ret = dw_pcie_write(va_cfg_base + where, size, *val);
+	else
+		ret = dw_pcie_read(va_cfg_base + where, size, val);
+
+	if (pci->num_viewport <= 2)
+		dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
+					  PCIE_ATU_TYPE_IO, pp->io_base,
+					  pp->io_bus_addr, pp->io_size);
+
+	return ret;
+}
+
+static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+				 u32 devfn, int where, int size, u32 *val)
+{
+	if (pp->ops->rd_other_conf)
+		return pp->ops->rd_other_conf(pp, bus, devfn, where,
+					      size, val);
+
+	return dw_pcie_access_other_conf(pp, bus, devfn, where, size, val,
+					 false);
+}
+
+static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+				 u32 devfn, int where, int size, u32 val)
+{
+	if (pp->ops->wr_other_conf)
+		return pp->ops->wr_other_conf(pp, bus, devfn, where,
+					      size, val);
+
+	return dw_pcie_access_other_conf(pp, bus, devfn, where, size, &val,
+					 true);
+}
+
+static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
+				int dev)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+	/* If there is no link, then there is no device */
+	if (bus->number != pp->root_bus_nr) {
+		if (!dw_pcie_link_up(pci))
+			return 0;
+	}
+
+	/* Access only one slot on each root port */
+	if (bus->number == pp->root_bus_nr && dev > 0)
+		return 0;
+
+	return 1;
+}
+
+static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+			   int size, u32 *val)
+{
+	struct pcie_port *pp = bus->sysdata;
+
+	if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
+		*val = 0xffffffff;
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+
+	if (bus->number == pp->root_bus_nr)
+		return dw_pcie_rd_own_conf(pp, where, size, val);
+
+	return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
+}
+
+static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+			   int where, int size, u32 val)
+{
+	struct pcie_port *pp = bus->sysdata;
+
+	if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	if (bus->number == pp->root_bus_nr)
+		return dw_pcie_wr_own_conf(pp, where, size, val);
+
+	return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
+}
+
+static struct pci_ops dw_pcie_ops = {
+	.read = dw_pcie_rd_conf,
+	.write = dw_pcie_wr_conf,
+};
+
+void dw_pcie_setup_rc(struct pcie_port *pp)
+{
+	u32 val, ctrl, num_ctrls;
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+	/*
+	 * Enable DBI read-only registers for writing/updating configuration.
+	 * Write permission gets disabled towards the end of this function.
+	 */
+	dw_pcie_dbi_ro_wr_en(pci);
+
+	dw_pcie_setup(pci);
+
+	if (!pp->ops->msi_host_init) {
+		num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+		/* Initialize IRQ Status array */
+		for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+			pp->irq_mask[ctrl] = ~0;
+			dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK +
+					    (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+					    4, pp->irq_mask[ctrl]);
+			dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
+					    (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+					    4, ~0);
+		}
+	}
+
+	/* Setup RC BARs */
+	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
+	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
+
+	/* Setup interrupt pins */
+	val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
+	val &= 0xffff00ff;
+	val |= 0x00000100;
+	dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
+
+	/* Setup bus numbers */
+	val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
+	val &= 0xff000000;
+	val |= 0x00ff0100;
+	dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
+
+	/* Setup command register */
+	val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
+	val &= 0xffff0000;
+	val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
+		PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
+	dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
+
+	/*
+	 * If the platform provides ->rd_other_conf, it means the platform
+	 * uses its own address translation component rather than ATU, so
+	 * we should not program the ATU here.
+	 */
+	if (!pp->ops->rd_other_conf) {
+		dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
+					  PCIE_ATU_TYPE_MEM, pp->mem_base,
+					  pp->mem_bus_addr, pp->mem_size);
+		if (pci->num_viewport > 2)
+			dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2,
+						  PCIE_ATU_TYPE_IO, pp->io_base,
+						  pp->io_bus_addr, pp->io_size);
+	}
+
+	dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
+
+	/* Program correct class for RC */
+	dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
+
+	dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
+	val |= PORT_LOGIC_SPEED_CHANGE;
+	dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
+
+	dw_pcie_dbi_ro_wr_dis(pci);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-designware-plat.c b/marvell/linux/drivers/pci/controller/dwc/pcie-designware-plat.c
new file mode 100644
index 0000000..b58fdcb
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe RC driver for Synopsys DesignWare Core
+ *
+ * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Authors: Joao Pinto <Joao.Pinto@synopsys.com>
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+#include <linux/regmap.h>
+
+#include "pcie-designware.h"
+
+struct dw_plat_pcie {
+	struct dw_pcie			*pci;
+	struct regmap			*regmap;
+	enum dw_pcie_device_mode	mode;
+};
+
+struct dw_plat_pcie_of_data {
+	enum dw_pcie_device_mode	mode;
+};
+
+static const struct of_device_id dw_plat_pcie_of_match[];
+
+static int dw_plat_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+	dw_pcie_setup_rc(pp);
+	dw_pcie_wait_for_link(pci);
+
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		dw_pcie_msi_init(pp);
+
+	return 0;
+}
+
+static void dw_plat_set_num_vectors(struct pcie_port *pp)
+{
+	pp->num_vectors = MAX_MSI_IRQS;
+}
+
+static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
+	.host_init = dw_plat_pcie_host_init,
+	.set_num_vectors = dw_plat_set_num_vectors,
+};
+
+static int dw_plat_pcie_establish_link(struct dw_pcie *pci)
+{
+	return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	.start_link = dw_plat_pcie_establish_link,
+};
+
+static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	enum pci_barno bar;
+
+	for (bar = BAR_0; bar <= BAR_5; bar++)
+		dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+				     enum pci_epc_irq_type type,
+				     u16 interrupt_num)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+	switch (type) {
+	case PCI_EPC_IRQ_LEGACY:
+		return dw_pcie_ep_raise_legacy_irq(ep, func_no);
+	case PCI_EPC_IRQ_MSI:
+		return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+	case PCI_EPC_IRQ_MSIX:
+		return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+	default:
+		dev_err(pci->dev, "UNKNOWN IRQ type\n");
+	}
+
+	return 0;
+}
+
+static const struct pci_epc_features dw_plat_pcie_epc_features = {
+	.linkup_notifier = false,
+	.msi_capable = true,
+	.msix_capable = true,
+};
+
+static const struct pci_epc_features*
+dw_plat_pcie_get_features(struct dw_pcie_ep *ep)
+{
+	return &dw_plat_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+	.ep_init = dw_plat_pcie_ep_init,
+	.raise_irq = dw_plat_pcie_ep_raise_irq,
+	.get_features = dw_plat_pcie_get_features,
+};
+
+static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
+				 struct platform_device *pdev)
+{
+	struct dw_pcie *pci = dw_plat_pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	pp->irq = platform_get_irq(pdev, 1);
+	if (pp->irq < 0)
+		return pp->irq;
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		pp->msi_irq = platform_get_irq(pdev, 0);
+		if (pp->msi_irq < 0)
+			return pp->msi_irq;
+	}
+
+	pp->ops = &dw_plat_pcie_host_ops;
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "Failed to initialize host\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int dw_plat_add_pcie_ep(struct dw_plat_pcie *dw_plat_pcie,
+			       struct platform_device *pdev)
+{
+	int ret;
+	struct dw_pcie_ep *ep;
+	struct resource *res;
+	struct device *dev = &pdev->dev;
+	struct dw_pcie *pci = dw_plat_pcie->pci;
+
+	ep = &pci->ep;
+	ep->ops = &pcie_ep_ops;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
+	pci->dbi_base2 = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pci->dbi_base2))
+		return PTR_ERR(pci->dbi_base2);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+	if (!res)
+		return -EINVAL;
+
+	ep->phys_base = res->start;
+	ep->addr_size = resource_size(res);
+
+	ret = dw_pcie_ep_init(ep);
+	if (ret) {
+		dev_err(dev, "Failed to initialize endpoint\n");
+		return ret;
+	}
+	return 0;
+}
+
+static int dw_plat_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dw_plat_pcie *dw_plat_pcie;
+	struct dw_pcie *pci;
+	struct resource *res;  /* Resource from DT */
+	int ret;
+	const struct of_device_id *match;
+	const struct dw_plat_pcie_of_data *data;
+	enum dw_pcie_device_mode mode;
+
+	match = of_match_device(dw_plat_pcie_of_match, dev);
+	if (!match)
+		return -EINVAL;
+
+	data = (struct dw_plat_pcie_of_data *)match->data;
+	mode = (enum dw_pcie_device_mode)data->mode;
+
+	dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL);
+	if (!dw_plat_pcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	pci->dev = dev;
+	pci->ops = &dw_pcie_ops;
+
+	dw_plat_pcie->pci = pci;
+	dw_plat_pcie->mode = mode;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+	if (!res)
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	pci->dbi_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pci->dbi_base))
+		return PTR_ERR(pci->dbi_base);
+
+	platform_set_drvdata(pdev, dw_plat_pcie);
+
+	switch (dw_plat_pcie->mode) {
+	case DW_PCIE_RC_TYPE:
+		if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_HOST))
+			return -ENODEV;
+
+		ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev);
+		if (ret < 0)
+			return ret;
+		break;
+	case DW_PCIE_EP_TYPE:
+		if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP))
+			return -ENODEV;
+
+		ret = dw_plat_add_pcie_ep(dw_plat_pcie, pdev);
+		if (ret < 0)
+			return ret;
+		break;
+	default:
+		dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
+	}
+
+	return 0;
+}
+
+static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = {
+	.mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dw_plat_pcie_of_data dw_plat_pcie_ep_of_data = {
+	.mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct of_device_id dw_plat_pcie_of_match[] = {
+	{
+		.compatible = "snps,dw-pcie",
+		.data = &dw_plat_pcie_rc_of_data,
+	},
+	{
+		.compatible = "snps,dw-pcie-ep",
+		.data = &dw_plat_pcie_ep_of_data,
+	},
+	{},
+};
+
+static struct platform_driver dw_plat_pcie_driver = {
+	.driver = {
+		.name	= "dw-pcie",
+		.of_match_table = dw_plat_pcie_of_match,
+		.suppress_bind_attrs = true,
+	},
+	.probe = dw_plat_pcie_probe,
+};
+builtin_platform_driver(dw_plat_pcie_driver);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-designware.c b/marvell/linux/drivers/pci/controller/dwc/pcie-designware.c
new file mode 100644
index 0000000..820488d
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-designware.c
@@ -0,0 +1,559 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synopsys DesignWare PCIe host controller driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+/*
+ * These interfaces resemble the pci_find_*capability() interfaces, but these
+ * are for configuring host controllers, which are bridges *to* PCI devices but
+ * are not PCI devices themselves.
+ */
+static u8 __dw_pcie_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
+				  u8 cap)
+{
+	u8 cap_id, next_cap_ptr;
+	u16 reg;
+
+	if (!cap_ptr)
+		return 0;
+
+	reg = dw_pcie_readw_dbi(pci, cap_ptr);
+	cap_id = (reg & 0x00ff);
+
+	if (cap_id > PCI_CAP_ID_MAX)
+		return 0;
+
+	if (cap_id == cap)
+		return cap_ptr;
+
+	next_cap_ptr = (reg & 0xff00) >> 8;
+	return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
+}
+
+u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap)
+{
+	u8 next_cap_ptr;
+	u16 reg;
+
+	reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
+	next_cap_ptr = (reg & 0x00ff);
+
+	return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_find_capability);
+
+static u16 dw_pcie_find_next_ext_capability(struct dw_pcie *pci, u16 start,
+					    u8 cap)
+{
+	u32 header;
+	int ttl;
+	int pos = PCI_CFG_SPACE_SIZE;
+
+	/* minimum 8 bytes per capability */
+	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
+
+	if (start)
+		pos = start;
+
+	header = dw_pcie_readl_dbi(pci, pos);
+	/*
+	 * If we have no capabilities, this is indicated by cap ID,
+	 * cap version and next pointer all being 0.
+	 */
+	if (header == 0)
+		return 0;
+
+	while (ttl-- > 0) {
+		if (PCI_EXT_CAP_ID(header) == cap && pos != start)
+			return pos;
+
+		pos = PCI_EXT_CAP_NEXT(header);
+		if (pos < PCI_CFG_SPACE_SIZE)
+			break;
+
+		header = dw_pcie_readl_dbi(pci, pos);
+	}
+
+	return 0;
+}
+
+u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
+{
+	return dw_pcie_find_next_ext_capability(pci, 0, cap);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
+
+int dw_pcie_read(void __iomem *addr, int size, u32 *val)
+{
+	if (!IS_ALIGNED((uintptr_t)addr, size)) {
+		*val = 0;
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+	}
+
+	if (size == 4) {
+		*val = readl(addr);
+	} else if (size == 2) {
+		*val = readw(addr);
+	} else if (size == 1) {
+		*val = readb(addr);
+	} else {
+		*val = 0;
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+	}
+
+	return PCIBIOS_SUCCESSFUL;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_read);
+
+int dw_pcie_write(void __iomem *addr, int size, u32 val)
+{
+	if (!IS_ALIGNED((uintptr_t)addr, size))
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+
+	if (size == 4)
+		writel(val, addr);
+	else if (size == 2)
+		writew(val, addr);
+	else if (size == 1)
+		writeb(val, addr);
+	else
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+
+	return PCIBIOS_SUCCESSFUL;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_write);
+
+u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size)
+{
+	int ret;
+	u32 val;
+
+	if (pci->ops->read_dbi)
+		return pci->ops->read_dbi(pci, pci->dbi_base, reg, size);
+
+	ret = dw_pcie_read(pci->dbi_base + reg, size, &val);
+	if (ret)
+		dev_err(pci->dev, "Read DBI address failed\n");
+
+	return val;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_read_dbi);
+
+void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
+{
+	int ret;
+
+	if (pci->ops->write_dbi) {
+		pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val);
+		return;
+	}
+
+	ret = dw_pcie_write(pci->dbi_base + reg, size, val);
+	if (ret)
+		dev_err(pci->dev, "Write DBI address failed\n");
+}
+EXPORT_SYMBOL_GPL(dw_pcie_write_dbi);
+
+u32 dw_pcie_read_dbi2(struct dw_pcie *pci, u32 reg, size_t size)
+{
+	int ret;
+	u32 val;
+
+	if (pci->ops->read_dbi2)
+		return pci->ops->read_dbi2(pci, pci->dbi_base2, reg, size);
+
+	ret = dw_pcie_read(pci->dbi_base2 + reg, size, &val);
+	if (ret)
+		dev_err(pci->dev, "read DBI address failed\n");
+
+	return val;
+}
+
+void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
+{
+	int ret;
+
+	if (pci->ops->write_dbi2) {
+		pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
+		return;
+	}
+
+	ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
+	if (ret)
+		dev_err(pci->dev, "write DBI address failed\n");
+}
+
+u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size)
+{
+	int ret;
+	u32 val;
+
+	if (pci->ops->read_dbi)
+		return pci->ops->read_dbi(pci, pci->atu_base, reg, size);
+
+	ret = dw_pcie_read(pci->atu_base + reg, size, &val);
+	if (ret)
+		dev_err(pci->dev, "Read ATU address failed\n");
+
+	return val;
+}
+
+void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
+{
+	int ret;
+
+	if (pci->ops->write_dbi) {
+		pci->ops->write_dbi(pci, pci->atu_base, reg, size, val);
+		return;
+	}
+
+	ret = dw_pcie_write(pci->atu_base + reg, size, val);
+	if (ret)
+		dev_err(pci->dev, "Write ATU address failed\n");
+}
+
+static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
+{
+	u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
+
+	return dw_pcie_readl_atu(pci, offset + reg);
+}
+
+static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
+				     u32 val)
+{
+	u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
+
+	dw_pcie_writel_atu(pci, offset + reg, val);
+}
+
+static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
+					     int type, u64 cpu_addr,
+					     u64 pci_addr, u32 size)
+{
+	u32 retries, val;
+
+	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
+				 lower_32_bits(cpu_addr));
+	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
+				 upper_32_bits(cpu_addr));
+	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT,
+				 lower_32_bits(cpu_addr + size - 1));
+	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
+				 lower_32_bits(pci_addr));
+	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
+				 upper_32_bits(pci_addr));
+	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
+				 type);
+	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
+				 PCIE_ATU_ENABLE);
+
+	/*
+	 * Make sure ATU enable takes effect before any subsequent config
+	 * and I/O accesses.
+	 */
+	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
+		val = dw_pcie_readl_ob_unroll(pci, index,
+					      PCIE_ATU_UNR_REGION_CTRL2);
+		if (val & PCIE_ATU_ENABLE)
+			return;
+
+		mdelay(LINK_WAIT_IATU);
+	}
+	dev_err(pci->dev, "Outbound iATU is not being enabled\n");
+}
+
+void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
+			       u64 cpu_addr, u64 pci_addr, u32 size)
+{
+	u32 retries, val;
+
+	if (pci->ops->cpu_addr_fixup)
+		cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
+
+	if (pci->iatu_unroll_enabled) {
+		dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr,
+						 pci_addr, size);
+		return;
+	}
+
+	dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
+			   PCIE_ATU_REGION_OUTBOUND | index);
+	dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
+			   lower_32_bits(cpu_addr));
+	dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
+			   upper_32_bits(cpu_addr));
+	dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
+			   lower_32_bits(cpu_addr + size - 1));
+	dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
+			   lower_32_bits(pci_addr));
+	dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
+			   upper_32_bits(pci_addr));
+	dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
+	dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
+
+	/*
+	 * Make sure ATU enable takes effect before any subsequent config
+	 * and I/O accesses.
+	 */
+	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
+		val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
+		if (val & PCIE_ATU_ENABLE)
+			return;
+
+		mdelay(LINK_WAIT_IATU);
+	}
+	dev_err(pci->dev, "Outbound iATU is not being enabled\n");
+}
+
+static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
+{
+	u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
+
+	return dw_pcie_readl_atu(pci, offset + reg);
+}
+
+static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
+				     u32 val)
+{
+	u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
+
+	dw_pcie_writel_atu(pci, offset + reg, val);
+}
+
+static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
+					   int bar, u64 cpu_addr,
+					   enum dw_pcie_as_type as_type)
+{
+	int type;
+	u32 retries, val;
+
+	dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
+				 lower_32_bits(cpu_addr));
+	dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
+				 upper_32_bits(cpu_addr));
+
+	switch (as_type) {
+	case DW_PCIE_AS_MEM:
+		type = PCIE_ATU_TYPE_MEM;
+		break;
+	case DW_PCIE_AS_IO:
+		type = PCIE_ATU_TYPE_IO;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type);
+	dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
+				 PCIE_ATU_ENABLE |
+				 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
+
+	/*
+	 * Make sure ATU enable takes effect before any subsequent config
+	 * and I/O accesses.
+	 */
+	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
+		val = dw_pcie_readl_ib_unroll(pci, index,
+					      PCIE_ATU_UNR_REGION_CTRL2);
+		if (val & PCIE_ATU_ENABLE)
+			return 0;
+
+		mdelay(LINK_WAIT_IATU);
+	}
+	dev_err(pci->dev, "Inbound iATU is not being enabled\n");
+
+	return -EBUSY;
+}
+
+int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
+			     u64 cpu_addr, enum dw_pcie_as_type as_type)
+{
+	int type;
+	u32 retries, val;
+
+	if (pci->iatu_unroll_enabled)
+		return dw_pcie_prog_inbound_atu_unroll(pci, index, bar,
+						       cpu_addr, as_type);
+
+	dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
+			   index);
+	dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
+	dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
+
+	switch (as_type) {
+	case DW_PCIE_AS_MEM:
+		type = PCIE_ATU_TYPE_MEM;
+		break;
+	case DW_PCIE_AS_IO:
+		type = PCIE_ATU_TYPE_IO;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
+	dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE
+			   | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
+
+	/*
+	 * Make sure ATU enable takes effect before any subsequent config
+	 * and I/O accesses.
+	 */
+	for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
+		val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
+		if (val & PCIE_ATU_ENABLE)
+			return 0;
+
+		mdelay(LINK_WAIT_IATU);
+	}
+	dev_err(pci->dev, "Inbound iATU is not being enabled\n");
+
+	return -EBUSY;
+}
+
+void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
+			 enum dw_pcie_region_type type)
+{
+	int region;
+
+	switch (type) {
+	case DW_PCIE_REGION_INBOUND:
+		region = PCIE_ATU_REGION_INBOUND;
+		break;
+	case DW_PCIE_REGION_OUTBOUND:
+		region = PCIE_ATU_REGION_OUTBOUND;
+		break;
+	default:
+		return;
+	}
+
+	dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
+	dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, (u32)~PCIE_ATU_ENABLE);
+}
+
+int dw_pcie_wait_for_link(struct dw_pcie *pci)
+{
+	int retries;
+
+	/* Check if the link is up or not */
+	for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+		if (dw_pcie_link_up(pci)) {
+			dev_info(pci->dev, "Link up\n");
+			return 0;
+		}
+		usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+	}
+
+	dev_info(pci->dev, "Phy link never came up\n");
+
+	return -ETIMEDOUT;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link);
+
+int dw_pcie_link_up(struct dw_pcie *pci)
+{
+	u32 val;
+
+	if (pci->ops->link_up)
+		return pci->ops->link_up(pci);
+
+	val = readl(pci->dbi_base + PCIE_PORT_DEBUG1);
+	return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
+		(!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
+}
+
+static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
+{
+	u32 val;
+
+	val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
+	if (val == 0xffffffff)
+		return 1;
+
+	return 0;
+}
+
+void dw_pcie_setup(struct dw_pcie *pci)
+{
+	int ret;
+	u32 val;
+	u32 lanes;
+	struct device *dev = pci->dev;
+	struct device_node *np = dev->of_node;
+
+	if (pci->version >= 0x480A || (!pci->version &&
+				       dw_pcie_iatu_unroll_enabled(pci))) {
+		pci->iatu_unroll_enabled = true;
+		if (!pci->atu_base)
+			pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
+	}
+	dev_dbg(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
+		"enabled" : "disabled");
+
+
+	ret = of_property_read_u32(np, "num-lanes", &lanes);
+	if (ret) {
+		dev_dbg(pci->dev, "property num-lanes isn't found\n");
+		return;
+	}
+
+	/* Set the number of lanes */
+	val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
+	val &= ~PORT_LINK_MODE_MASK;
+	switch (lanes) {
+	case 1:
+		val |= PORT_LINK_MODE_1_LANES;
+		break;
+	case 2:
+		val |= PORT_LINK_MODE_2_LANES;
+		break;
+	case 4:
+		val |= PORT_LINK_MODE_4_LANES;
+		break;
+	case 8:
+		val |= PORT_LINK_MODE_8_LANES;
+		break;
+	default:
+		dev_err(pci->dev, "num-lanes %u: invalid value\n", lanes);
+		return;
+	}
+	dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+
+	/* Set link width speed control register */
+	val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+	val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
+	switch (lanes) {
+	case 1:
+		val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
+		break;
+	case 2:
+		val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
+		break;
+	case 4:
+		val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
+		break;
+	case 8:
+		val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
+		break;
+	}
+	dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+
+	if (of_property_read_bool(np, "snps,enable-cdm-check")) {
+		val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
+		val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
+		       PCIE_PL_CHK_REG_CHK_REG_START;
+		dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
+	}
+}
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-designware.h b/marvell/linux/drivers/pci/controller/dwc/pcie-designware.h
new file mode 100644
index 0000000..5a18e94
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-designware.h
@@ -0,0 +1,444 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Synopsys DesignWare PCIe host controller driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ */
+
+#ifndef _PCIE_DESIGNWARE_H
+#define _PCIE_DESIGNWARE_H
+
+#include <linux/bitfield.h>
+#include <linux/dma-mapping.h>
+#include <linux/irq.h>
+#include <linux/msi.h>
+#include <linux/pci.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES		10
+#define LINK_WAIT_USLEEP_MIN		90000
+#define LINK_WAIT_USLEEP_MAX		100000
+
+/* Parameters for the waiting for iATU enabled routine */
+#define LINK_WAIT_MAX_IATU_RETRIES	5
+#define LINK_WAIT_IATU			9
+
+/* Synopsys-specific PCIe configuration registers */
+#define PCIE_PORT_LINK_CONTROL		0x710
+#define PORT_LINK_MODE_MASK		GENMASK(21, 16)
+#define PORT_LINK_MODE(n)		FIELD_PREP(PORT_LINK_MODE_MASK, n)
+#define PORT_LINK_MODE_1_LANES		PORT_LINK_MODE(0x1)
+#define PORT_LINK_MODE_2_LANES		PORT_LINK_MODE(0x3)
+#define PORT_LINK_MODE_4_LANES		PORT_LINK_MODE(0x7)
+#define PORT_LINK_MODE_8_LANES		PORT_LINK_MODE(0xf)
+
+#define PCIE_PORT_DEBUG0		0x728
+#define PORT_LOGIC_LTSSM_STATE_MASK	0x1f
+#define PORT_LOGIC_LTSSM_STATE_L0	0x11
+#define PCIE_PORT_DEBUG1		0x72C
+#define PCIE_PORT_DEBUG1_LINK_UP		BIT(4)
+#define PCIE_PORT_DEBUG1_LINK_IN_TRAINING	BIT(29)
+
+#define PCIE_LINK_WIDTH_SPEED_CONTROL	0x80C
+#define PORT_LOGIC_SPEED_CHANGE		BIT(17)
+#define PORT_LOGIC_LINK_WIDTH_MASK	GENMASK(12, 8)
+#define PORT_LOGIC_LINK_WIDTH(n)	FIELD_PREP(PORT_LOGIC_LINK_WIDTH_MASK, n)
+#define PORT_LOGIC_LINK_WIDTH_1_LANES	PORT_LOGIC_LINK_WIDTH(0x1)
+#define PORT_LOGIC_LINK_WIDTH_2_LANES	PORT_LOGIC_LINK_WIDTH(0x2)
+#define PORT_LOGIC_LINK_WIDTH_4_LANES	PORT_LOGIC_LINK_WIDTH(0x4)
+#define PORT_LOGIC_LINK_WIDTH_8_LANES	PORT_LOGIC_LINK_WIDTH(0x8)
+
+#define PCIE_MSI_ADDR_LO		0x820
+#define PCIE_MSI_ADDR_HI		0x824
+#define PCIE_MSI_INTR0_ENABLE		0x828
+#define PCIE_MSI_INTR0_MASK		0x82C
+#define PCIE_MSI_INTR0_STATUS		0x830
+
+#define PCIE_ATU_VIEWPORT		0x900
+#define PCIE_ATU_REGION_INBOUND		BIT(31)
+#define PCIE_ATU_REGION_OUTBOUND	0
+#define PCIE_ATU_REGION_INDEX2		0x2
+#define PCIE_ATU_REGION_INDEX1		0x1
+#define PCIE_ATU_REGION_INDEX0		0x0
+#define PCIE_ATU_CR1			0x904
+#define PCIE_ATU_TYPE_MEM		0x0
+#define PCIE_ATU_TYPE_IO		0x2
+#define PCIE_ATU_TYPE_CFG0		0x4
+#define PCIE_ATU_TYPE_CFG1		0x5
+#define PCIE_ATU_CR2			0x908
+#define PCIE_ATU_ENABLE			BIT(31)
+#define PCIE_ATU_BAR_MODE_ENABLE	BIT(30)
+#define PCIE_ATU_LOWER_BASE		0x90C
+#define PCIE_ATU_UPPER_BASE		0x910
+#define PCIE_ATU_LIMIT			0x914
+#define PCIE_ATU_LOWER_TARGET		0x918
+#define PCIE_ATU_BUS(x)			FIELD_PREP(GENMASK(31, 24), x)
+#define PCIE_ATU_DEV(x)			FIELD_PREP(GENMASK(23, 19), x)
+#define PCIE_ATU_FUNC(x)		FIELD_PREP(GENMASK(18, 16), x)
+#define PCIE_ATU_UPPER_TARGET		0x91C
+
+#define PCIE_MISC_CONTROL_1_OFF		0x8BC
+#define PCIE_DBI_RO_WR_EN		BIT(0)
+
+#define PCIE_PL_CHK_REG_CONTROL_STATUS			0xB20
+#define PCIE_PL_CHK_REG_CHK_REG_START			BIT(0)
+#define PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS		BIT(1)
+#define PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR	BIT(16)
+#define PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR		BIT(17)
+#define PCIE_PL_CHK_REG_CHK_REG_COMPLETE		BIT(18)
+
+#define PCIE_PL_CHK_REG_ERR_ADDR			0xB28
+
+/*
+ * iATU Unroll-specific register definitions
+ * From 4.80 core version the address translation will be made by unroll
+ */
+#define PCIE_ATU_UNR_REGION_CTRL1	0x00
+#define PCIE_ATU_UNR_REGION_CTRL2	0x04
+#define PCIE_ATU_UNR_LOWER_BASE		0x08
+#define PCIE_ATU_UNR_UPPER_BASE		0x0C
+#define PCIE_ATU_UNR_LIMIT		0x10
+#define PCIE_ATU_UNR_LOWER_TARGET	0x14
+#define PCIE_ATU_UNR_UPPER_TARGET	0x18
+
+/*
+ * The default address offset between dbi_base and atu_base. Root controller
+ * drivers are not required to initialize atu_base if the offset matches this
+ * default; the driver core automatically derives atu_base from dbi_base using
+ * this offset, if atu_base not set.
+ */
+#define DEFAULT_DBI_ATU_OFFSET (0x3 << 20)
+
+/* Register address builder */
+#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \
+		((region) << 9)
+
+#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
+		(((region) << 9) | BIT(8))
+
+#define MAX_MSI_IRQS			256
+#define MAX_MSI_IRQS_PER_CTRL		32
+#define MAX_MSI_CTRLS			(MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL)
+#define MSI_REG_CTRL_BLOCK_SIZE		12
+#define MSI_DEF_NUM_VECTORS		32
+
+/* Maximum number of inbound/outbound iATUs */
+#define MAX_IATU_IN			256
+#define MAX_IATU_OUT			256
+
+struct pcie_port;
+struct dw_pcie;
+struct dw_pcie_ep;
+
+enum dw_pcie_region_type {
+	DW_PCIE_REGION_UNKNOWN,
+	DW_PCIE_REGION_INBOUND,
+	DW_PCIE_REGION_OUTBOUND,
+};
+
+enum dw_pcie_device_mode {
+	DW_PCIE_UNKNOWN_TYPE,
+	DW_PCIE_EP_TYPE,
+	DW_PCIE_LEG_EP_TYPE,
+	DW_PCIE_RC_TYPE,
+};
+
+struct dw_pcie_host_ops {
+	int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
+	int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val);
+	int (*rd_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
+			     unsigned int devfn, int where, int size, u32 *val);
+	int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
+			     unsigned int devfn, int where, int size, u32 val);
+	int (*host_init)(struct pcie_port *pp);
+	void (*scan_bus)(struct pcie_port *pp);
+	void (*set_num_vectors)(struct pcie_port *pp);
+	int (*msi_host_init)(struct pcie_port *pp);
+};
+
+struct pcie_port {
+	u8			root_bus_nr;
+	u64			cfg0_base;
+	void __iomem		*va_cfg0_base;
+	u32			cfg0_size;
+	u64			cfg1_base;
+	void __iomem		*va_cfg1_base;
+	u32			cfg1_size;
+	resource_size_t		io_base;
+	phys_addr_t		io_bus_addr;
+	u32			io_size;
+	u64			mem_base;
+	phys_addr_t		mem_bus_addr;
+	u32			mem_size;
+	struct resource		*cfg;
+	struct resource		*io;
+	struct resource		*mem;
+	struct resource		*busn;
+	int			irq;
+	const struct dw_pcie_host_ops *ops;
+	int			msi_irq;
+	struct irq_domain	*irq_domain;
+	struct irq_domain	*msi_domain;
+	dma_addr_t		msi_data;
+	struct page		*msi_page;
+	struct irq_chip		*msi_irq_chip;
+	u32			num_vectors;
+	u32			irq_mask[MAX_MSI_CTRLS];
+	struct pci_bus		*root_bus;
+	raw_spinlock_t		lock;
+	DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
+};
+
+enum dw_pcie_as_type {
+	DW_PCIE_AS_UNKNOWN,
+	DW_PCIE_AS_MEM,
+	DW_PCIE_AS_IO,
+};
+
+struct dw_pcie_ep_ops {
+	void	(*ep_init)(struct dw_pcie_ep *ep);
+	int	(*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
+			     enum pci_epc_irq_type type, u16 interrupt_num);
+	const struct pci_epc_features* (*get_features)(struct dw_pcie_ep *ep);
+};
+
+struct dw_pcie_ep {
+	struct pci_epc		*epc;
+	const struct dw_pcie_ep_ops *ops;
+	phys_addr_t		phys_base;
+	size_t			addr_size;
+	size_t			page_size;
+	u8			bar_to_atu[6];
+	phys_addr_t		*outbound_addr;
+	unsigned long		*ib_window_map;
+	unsigned long		*ob_window_map;
+	u32			num_ib_windows;
+	u32			num_ob_windows;
+	void __iomem		*msi_mem;
+	phys_addr_t		msi_mem_phys;
+	u8			msi_cap;	/* MSI capability offset */
+	u8			msix_cap;	/* MSI-X capability offset */
+};
+
+struct dw_pcie_ops {
+	u64	(*cpu_addr_fixup)(struct dw_pcie *pcie, u64 cpu_addr);
+	u32	(*read_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
+			    size_t size);
+	void	(*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
+			     size_t size, u32 val);
+	u32     (*read_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
+			     size_t size);
+	void    (*write_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
+			      size_t size, u32 val);
+	int	(*link_up)(struct dw_pcie *pcie);
+	int	(*start_link)(struct dw_pcie *pcie);
+	void	(*stop_link)(struct dw_pcie *pcie);
+};
+
+struct dw_pcie {
+	struct device		*dev;
+	void __iomem		*dbi_base;
+	void __iomem		*dbi_base2;
+	/* Used when iatu_unroll_enabled is true */
+	void __iomem		*atu_base;
+	u32			num_viewport;
+	u8			iatu_unroll_enabled;
+	struct pcie_port	pp;
+	struct dw_pcie_ep	ep;
+	const struct dw_pcie_ops *ops;
+	unsigned int		version;
+};
+
+#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
+
+#define to_dw_pcie_from_ep(endpoint)   \
+		container_of((endpoint), struct dw_pcie, ep)
+
+u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap);
+u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap);
+
+int dw_pcie_read(void __iomem *addr, int size, u32 *val);
+int dw_pcie_write(void __iomem *addr, int size, u32 val);
+
+u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size);
+void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
+u32 dw_pcie_read_dbi2(struct dw_pcie *pci, u32 reg, size_t size);
+void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
+u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size);
+void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
+int dw_pcie_link_up(struct dw_pcie *pci);
+int dw_pcie_wait_for_link(struct dw_pcie *pci);
+void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
+			       int type, u64 cpu_addr, u64 pci_addr,
+			       u32 size);
+int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
+			     u64 cpu_addr, enum dw_pcie_as_type as_type);
+void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
+			 enum dw_pcie_region_type type);
+void dw_pcie_setup(struct dw_pcie *pci);
+
+static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
+{
+	dw_pcie_write_dbi(pci, reg, 0x4, val);
+}
+
+static inline u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg)
+{
+	return dw_pcie_read_dbi(pci, reg, 0x4);
+}
+
+static inline void dw_pcie_writew_dbi(struct dw_pcie *pci, u32 reg, u16 val)
+{
+	dw_pcie_write_dbi(pci, reg, 0x2, val);
+}
+
+static inline u16 dw_pcie_readw_dbi(struct dw_pcie *pci, u32 reg)
+{
+	return dw_pcie_read_dbi(pci, reg, 0x2);
+}
+
+static inline void dw_pcie_writeb_dbi(struct dw_pcie *pci, u32 reg, u8 val)
+{
+	dw_pcie_write_dbi(pci, reg, 0x1, val);
+}
+
+static inline u8 dw_pcie_readb_dbi(struct dw_pcie *pci, u32 reg)
+{
+	return dw_pcie_read_dbi(pci, reg, 0x1);
+}
+
+static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val)
+{
+	dw_pcie_write_dbi2(pci, reg, 0x4, val);
+}
+
+static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg)
+{
+	return dw_pcie_read_dbi2(pci, reg, 0x4);
+}
+
+static inline void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
+{
+	dw_pcie_write_atu(pci, reg, 0x4, val);
+}
+
+static inline u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
+{
+	return dw_pcie_read_atu(pci, reg, 0x4);
+}
+
+static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci)
+{
+	u32 reg;
+	u32 val;
+
+	reg = PCIE_MISC_CONTROL_1_OFF;
+	val = dw_pcie_readl_dbi(pci, reg);
+	val |= PCIE_DBI_RO_WR_EN;
+	dw_pcie_writel_dbi(pci, reg, val);
+}
+
+static inline void dw_pcie_dbi_ro_wr_dis(struct dw_pcie *pci)
+{
+	u32 reg;
+	u32 val;
+
+	reg = PCIE_MISC_CONTROL_1_OFF;
+	val = dw_pcie_readl_dbi(pci, reg);
+	val &= ~PCIE_DBI_RO_WR_EN;
+	dw_pcie_writel_dbi(pci, reg, val);
+}
+
+#ifdef CONFIG_PCIE_DW_HOST
+irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
+void dw_pcie_msi_init(struct pcie_port *pp);
+void dw_pcie_free_msi(struct pcie_port *pp);
+void dw_pcie_setup_rc(struct pcie_port *pp);
+int dw_pcie_host_init(struct pcie_port *pp);
+void dw_pcie_host_deinit(struct pcie_port *pp);
+int dw_pcie_allocate_domains(struct pcie_port *pp);
+#else
+static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
+{
+	return IRQ_NONE;
+}
+
+static inline void dw_pcie_msi_init(struct pcie_port *pp)
+{
+}
+
+static inline void dw_pcie_free_msi(struct pcie_port *pp)
+{
+}
+
+static inline void dw_pcie_setup_rc(struct pcie_port *pp)
+{
+}
+
+static inline int dw_pcie_host_init(struct pcie_port *pp)
+{
+	return 0;
+}
+
+static inline void dw_pcie_host_deinit(struct pcie_port *pp)
+{
+}
+
+static inline int dw_pcie_allocate_domains(struct pcie_port *pp)
+{
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PCIE_DW_EP
+void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
+int dw_pcie_ep_init(struct dw_pcie_ep *ep);
+void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
+int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no);
+int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
+			     u8 interrupt_num);
+int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+			     u16 interrupt_num);
+void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
+#else
+static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
+{
+}
+
+static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+	return 0;
+}
+
+static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+{
+}
+
+static inline int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
+{
+	return 0;
+}
+
+static inline int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
+					   u8 interrupt_num)
+{
+	return 0;
+}
+
+static inline int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+					   u16 interrupt_num)
+{
+	return 0;
+}
+
+static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
+{
+}
+#endif
+#endif /* _PCIE_DESIGNWARE_H */
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-hisi.c b/marvell/linux/drivers/pci/controller/dwc/pcie-hisi.c
new file mode 100644
index 0000000..6d9e1b2
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-hisi.c
@@ -0,0 +1,398 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for HiSilicon SoCs
+ *
+ * Copyright (C) 2015 HiSilicon Co., Ltd. http://www.hisilicon.com
+ *
+ * Authors: Zhou Wang <wangzhou1@hisilicon.com>
+ *          Dacai Zhu <zhudacai@hisilicon.com>
+ *          Gabriele Paoloni <gabriele.paoloni@huawei.com>
+ */
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
+#include <linux/regmap.h>
+#include "../../pci.h"
+
+#if defined(CONFIG_PCI_HISI) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
+
+static int hisi_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+			     int size, u32 *val)
+{
+	struct pci_config_window *cfg = bus->sysdata;
+	int dev = PCI_SLOT(devfn);
+
+	if (bus->number == cfg->busr.start) {
+		/* access only one slot on each root port */
+		if (dev > 0)
+			return PCIBIOS_DEVICE_NOT_FOUND;
+		else
+			return pci_generic_config_read32(bus, devfn, where,
+							 size, val);
+	}
+
+	return pci_generic_config_read(bus, devfn, where, size, val);
+}
+
+static int hisi_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+			     int where, int size, u32 val)
+{
+	struct pci_config_window *cfg = bus->sysdata;
+	int dev = PCI_SLOT(devfn);
+
+	if (bus->number == cfg->busr.start) {
+		/* access only one slot on each root port */
+		if (dev > 0)
+			return PCIBIOS_DEVICE_NOT_FOUND;
+		else
+			return pci_generic_config_write32(bus, devfn, where,
+							  size, val);
+	}
+
+	return pci_generic_config_write(bus, devfn, where, size, val);
+}
+
+static void __iomem *hisi_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+				       int where)
+{
+	struct pci_config_window *cfg = bus->sysdata;
+	void __iomem *reg_base = cfg->priv;
+
+	if (bus->number == cfg->busr.start)
+		return reg_base + where;
+	else
+		return pci_ecam_map_bus(bus, devfn, where);
+}
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+
+static int hisi_pcie_init(struct pci_config_window *cfg)
+{
+	struct device *dev = cfg->parent;
+	struct acpi_device *adev = to_acpi_device(dev);
+	struct acpi_pci_root *root = acpi_driver_data(adev);
+	struct resource *res;
+	void __iomem *reg_base;
+	int ret;
+
+	/*
+	 * Retrieve RC base and size from a HISI0081 device with _UID
+	 * matching our segment.
+	 */
+	res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
+	if (!res)
+		return -ENOMEM;
+
+	ret = acpi_get_rc_resources(dev, "HISI0081", root->segment, res);
+	if (ret) {
+		dev_err(dev, "can't get rc base address\n");
+		return -ENOMEM;
+	}
+
+	reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
+	if (!reg_base)
+		return -ENOMEM;
+
+	cfg->priv = reg_base;
+	return 0;
+}
+
+struct pci_ecam_ops hisi_pcie_ops = {
+	.bus_shift    = 20,
+	.init         =  hisi_pcie_init,
+	.pci_ops      = {
+		.map_bus    = hisi_pcie_map_bus,
+		.read       = hisi_pcie_rd_conf,
+		.write      = hisi_pcie_wr_conf,
+	}
+};
+
+#endif
+
+#ifdef CONFIG_PCI_HISI
+
+#include "pcie-designware.h"
+
+#define PCIE_SUBCTRL_SYS_STATE4_REG		0x6818
+#define PCIE_HIP06_CTRL_OFF			0x1000
+#define PCIE_SYS_STATE4				(PCIE_HIP06_CTRL_OFF + 0x31c)
+#define PCIE_LTSSM_LINKUP_STATE			0x11
+#define PCIE_LTSSM_STATE_MASK			0x3F
+
+#define to_hisi_pcie(x)	dev_get_drvdata((x)->dev)
+
+struct hisi_pcie;
+
+struct pcie_soc_ops {
+	int (*hisi_pcie_link_up)(struct hisi_pcie *hisi_pcie);
+};
+
+struct hisi_pcie {
+	struct dw_pcie *pci;
+	struct regmap *subctrl;
+	u32 port_id;
+	const struct pcie_soc_ops *soc_ops;
+};
+
+/* HipXX PCIe host only supports 32-bit config access */
+static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size,
+			      u32 *val)
+{
+	u32 reg;
+	u32 reg_val;
+	void *walker = &reg_val;
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+	walker += (where & 0x3);
+	reg = where & ~0x3;
+	reg_val = dw_pcie_readl_dbi(pci, reg);
+
+	if (size == 1)
+		*val = *(u8 __force *) walker;
+	else if (size == 2)
+		*val = *(u16 __force *) walker;
+	else if (size == 4)
+		*val = reg_val;
+	else
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+/* HipXX PCIe host only supports 32-bit config access */
+static int hisi_pcie_cfg_write(struct pcie_port *pp, int where, int  size,
+				u32 val)
+{
+	u32 reg_val;
+	u32 reg;
+	void *walker = &reg_val;
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+	walker += (where & 0x3);
+	reg = where & ~0x3;
+	if (size == 4)
+		dw_pcie_writel_dbi(pci, reg, val);
+	else if (size == 2) {
+		reg_val = dw_pcie_readl_dbi(pci, reg);
+		*(u16 __force *) walker = val;
+		dw_pcie_writel_dbi(pci, reg, reg_val);
+	} else if (size == 1) {
+		reg_val = dw_pcie_readl_dbi(pci, reg);
+		*(u8 __force *) walker = val;
+		dw_pcie_writel_dbi(pci, reg, reg_val);
+	} else
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int hisi_pcie_link_up_hip05(struct hisi_pcie *hisi_pcie)
+{
+	u32 val;
+
+	regmap_read(hisi_pcie->subctrl, PCIE_SUBCTRL_SYS_STATE4_REG +
+		    0x100 * hisi_pcie->port_id, &val);
+
+	return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE);
+}
+
+static int hisi_pcie_link_up_hip06(struct hisi_pcie *hisi_pcie)
+{
+	struct dw_pcie *pci = hisi_pcie->pci;
+	u32 val;
+
+	val = dw_pcie_readl_dbi(pci, PCIE_SYS_STATE4);
+
+	return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE);
+}
+
+static int hisi_pcie_link_up(struct dw_pcie *pci)
+{
+	struct hisi_pcie *hisi_pcie = to_hisi_pcie(pci);
+
+	return hisi_pcie->soc_ops->hisi_pcie_link_up(hisi_pcie);
+}
+
+static const struct dw_pcie_host_ops hisi_pcie_host_ops = {
+	.rd_own_conf = hisi_pcie_cfg_read,
+	.wr_own_conf = hisi_pcie_cfg_write,
+};
+
+static int hisi_add_pcie_port(struct hisi_pcie *hisi_pcie,
+			      struct platform_device *pdev)
+{
+	struct dw_pcie *pci = hisi_pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = &pdev->dev;
+	int ret;
+	u32 port_id;
+
+	if (of_property_read_u32(dev->of_node, "port-id", &port_id)) {
+		dev_err(dev, "failed to read port-id\n");
+		return -EINVAL;
+	}
+	if (port_id > 3) {
+		dev_err(dev, "Invalid port-id: %d\n", port_id);
+		return -EINVAL;
+	}
+	hisi_pcie->port_id = port_id;
+
+	pp->ops = &hisi_pcie_host_ops;
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "failed to initialize host\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	.link_up = hisi_pcie_link_up,
+};
+
+static int hisi_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dw_pcie *pci;
+	struct hisi_pcie *hisi_pcie;
+	struct resource *reg;
+	int ret;
+
+	hisi_pcie = devm_kzalloc(dev, sizeof(*hisi_pcie), GFP_KERNEL);
+	if (!hisi_pcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	pci->dev = dev;
+	pci->ops = &dw_pcie_ops;
+
+	hisi_pcie->pci = pci;
+
+	hisi_pcie->soc_ops = of_device_get_match_data(dev);
+
+	hisi_pcie->subctrl =
+	    syscon_regmap_lookup_by_compatible("hisilicon,pcie-sas-subctrl");
+	if (IS_ERR(hisi_pcie->subctrl)) {
+		dev_err(dev, "cannot get subctrl base\n");
+		return PTR_ERR(hisi_pcie->subctrl);
+	}
+
+	reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi");
+	pci->dbi_base = devm_pci_remap_cfg_resource(dev, reg);
+	if (IS_ERR(pci->dbi_base))
+		return PTR_ERR(pci->dbi_base);
+	platform_set_drvdata(pdev, hisi_pcie);
+
+	ret = hisi_add_pcie_port(hisi_pcie, pdev);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static struct pcie_soc_ops hip05_ops = {
+		&hisi_pcie_link_up_hip05
+};
+
+static struct pcie_soc_ops hip06_ops = {
+		&hisi_pcie_link_up_hip06
+};
+
+static const struct of_device_id hisi_pcie_of_match[] = {
+	{
+			.compatible = "hisilicon,hip05-pcie",
+			.data	    = (void *) &hip05_ops,
+	},
+	{
+			.compatible = "hisilicon,hip06-pcie",
+			.data	    = (void *) &hip06_ops,
+	},
+	{},
+};
+
+static struct platform_driver hisi_pcie_driver = {
+	.probe  = hisi_pcie_probe,
+	.driver = {
+		   .name = "hisi-pcie",
+		   .of_match_table = hisi_pcie_of_match,
+		   .suppress_bind_attrs = true,
+	},
+};
+builtin_platform_driver(hisi_pcie_driver);
+
+static int hisi_pcie_almost_ecam_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct pci_ecam_ops *ops;
+
+	ops = (struct pci_ecam_ops *)of_device_get_match_data(dev);
+	return pci_host_common_probe(pdev, ops);
+}
+
+static int hisi_pcie_platform_init(struct pci_config_window *cfg)
+{
+	struct device *dev = cfg->parent;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct resource *res;
+	void __iomem *reg_base;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!res) {
+		dev_err(dev, "missing \"reg[1]\"property\n");
+		return -EINVAL;
+	}
+
+	reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
+	if (!reg_base)
+		return -ENOMEM;
+
+	cfg->priv = reg_base;
+	return 0;
+}
+
+struct pci_ecam_ops hisi_pcie_platform_ops = {
+	.bus_shift    = 20,
+	.init         =  hisi_pcie_platform_init,
+	.pci_ops      = {
+		.map_bus    = hisi_pcie_map_bus,
+		.read       = hisi_pcie_rd_conf,
+		.write      = hisi_pcie_wr_conf,
+	}
+};
+
+static const struct of_device_id hisi_pcie_almost_ecam_of_match[] = {
+	{
+		.compatible =  "hisilicon,hip06-pcie-ecam",
+		.data	    = (void *) &hisi_pcie_platform_ops,
+	},
+	{
+		.compatible =  "hisilicon,hip07-pcie-ecam",
+		.data       = (void *) &hisi_pcie_platform_ops,
+	},
+	{},
+};
+
+static struct platform_driver hisi_pcie_almost_ecam_driver = {
+	.probe  = hisi_pcie_almost_ecam_probe,
+	.driver = {
+		   .name = "hisi-pcie-almost-ecam",
+		   .of_match_table = hisi_pcie_almost_ecam_of_match,
+		   .suppress_bind_attrs = true,
+	},
+};
+builtin_platform_driver(hisi_pcie_almost_ecam_driver);
+
+#endif
+#endif
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-histb.c b/marvell/linux/drivers/pci/controller/dwc/pcie-histb.c
new file mode 100644
index 0000000..811b5c6
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-histb.c
@@ -0,0 +1,471 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for HiSilicon STB SoCs
+ *
+ * Copyright (C) 2016-2017 HiSilicon Co., Ltd. http://www.hisilicon.com
+ *
+ * Authors: Ruqiang Ju <juruqiang@hisilicon.com>
+ *          Jianguo Sun <sunjianguo1@huawei.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+#define to_histb_pcie(x)	dev_get_drvdata((x)->dev)
+
+#define PCIE_SYS_CTRL0			0x0000
+#define PCIE_SYS_CTRL1			0x0004
+#define PCIE_SYS_CTRL7			0x001C
+#define PCIE_SYS_CTRL13			0x0034
+#define PCIE_SYS_CTRL15			0x003C
+#define PCIE_SYS_CTRL16			0x0040
+#define PCIE_SYS_CTRL17			0x0044
+
+#define PCIE_SYS_STAT0			0x0100
+#define PCIE_SYS_STAT4			0x0110
+
+#define PCIE_RDLH_LINK_UP		BIT(5)
+#define PCIE_XMLH_LINK_UP		BIT(15)
+#define PCIE_ELBI_SLV_DBI_ENABLE	BIT(21)
+#define PCIE_APP_LTSSM_ENABLE		BIT(11)
+
+#define PCIE_DEVICE_TYPE_MASK		GENMASK(31, 28)
+#define PCIE_WM_EP			0
+#define PCIE_WM_LEGACY			BIT(1)
+#define PCIE_WM_RC			BIT(30)
+
+#define PCIE_LTSSM_STATE_MASK		GENMASK(5, 0)
+#define PCIE_LTSSM_STATE_ACTIVE		0x11
+
+struct histb_pcie {
+	struct dw_pcie *pci;
+	struct clk *aux_clk;
+	struct clk *pipe_clk;
+	struct clk *sys_clk;
+	struct clk *bus_clk;
+	struct phy *phy;
+	struct reset_control *soft_reset;
+	struct reset_control *sys_reset;
+	struct reset_control *bus_reset;
+	void __iomem *ctrl;
+	int reset_gpio;
+	struct regulator *vpcie;
+};
+
+static u32 histb_pcie_readl(struct histb_pcie *histb_pcie, u32 reg)
+{
+	return readl(histb_pcie->ctrl + reg);
+}
+
+static void histb_pcie_writel(struct histb_pcie *histb_pcie, u32 reg, u32 val)
+{
+	writel(val, histb_pcie->ctrl + reg);
+}
+
+static void histb_pcie_dbi_w_mode(struct pcie_port *pp, bool enable)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct histb_pcie *hipcie = to_histb_pcie(pci);
+	u32 val;
+
+	val = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0);
+	if (enable)
+		val |= PCIE_ELBI_SLV_DBI_ENABLE;
+	else
+		val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+	histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, val);
+}
+
+static void histb_pcie_dbi_r_mode(struct pcie_port *pp, bool enable)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct histb_pcie *hipcie = to_histb_pcie(pci);
+	u32 val;
+
+	val = histb_pcie_readl(hipcie, PCIE_SYS_CTRL1);
+	if (enable)
+		val |= PCIE_ELBI_SLV_DBI_ENABLE;
+	else
+		val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+	histb_pcie_writel(hipcie, PCIE_SYS_CTRL1, val);
+}
+
+static u32 histb_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
+			       u32 reg, size_t size)
+{
+	u32 val;
+
+	histb_pcie_dbi_r_mode(&pci->pp, true);
+	dw_pcie_read(base + reg, size, &val);
+	histb_pcie_dbi_r_mode(&pci->pp, false);
+
+	return val;
+}
+
+static void histb_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
+				 u32 reg, size_t size, u32 val)
+{
+	histb_pcie_dbi_w_mode(&pci->pp, true);
+	dw_pcie_write(base + reg, size, val);
+	histb_pcie_dbi_w_mode(&pci->pp, false);
+}
+
+static int histb_pcie_rd_own_conf(struct pcie_port *pp, int where,
+				  int size, u32 *val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	int ret;
+
+	histb_pcie_dbi_r_mode(pp, true);
+	ret = dw_pcie_read(pci->dbi_base + where, size, val);
+	histb_pcie_dbi_r_mode(pp, false);
+
+	return ret;
+}
+
+static int histb_pcie_wr_own_conf(struct pcie_port *pp, int where,
+				  int size, u32 val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	int ret;
+
+	histb_pcie_dbi_w_mode(pp, true);
+	ret = dw_pcie_write(pci->dbi_base + where, size, val);
+	histb_pcie_dbi_w_mode(pp, false);
+
+	return ret;
+}
+
+static int histb_pcie_link_up(struct dw_pcie *pci)
+{
+	struct histb_pcie *hipcie = to_histb_pcie(pci);
+	u32 regval;
+	u32 status;
+
+	regval = histb_pcie_readl(hipcie, PCIE_SYS_STAT0);
+	status = histb_pcie_readl(hipcie, PCIE_SYS_STAT4);
+	status &= PCIE_LTSSM_STATE_MASK;
+	if ((regval & PCIE_XMLH_LINK_UP) && (regval & PCIE_RDLH_LINK_UP) &&
+	    (status == PCIE_LTSSM_STATE_ACTIVE))
+		return 1;
+
+	return 0;
+}
+
+static int histb_pcie_establish_link(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct histb_pcie *hipcie = to_histb_pcie(pci);
+	u32 regval;
+
+	if (dw_pcie_link_up(pci)) {
+		dev_info(pci->dev, "Link already up\n");
+		return 0;
+	}
+
+	/* PCIe RC work mode */
+	regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0);
+	regval &= ~PCIE_DEVICE_TYPE_MASK;
+	regval |= PCIE_WM_RC;
+	histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, regval);
+
+	/* setup root complex */
+	dw_pcie_setup_rc(pp);
+
+	/* assert LTSSM enable */
+	regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL7);
+	regval |= PCIE_APP_LTSSM_ENABLE;
+	histb_pcie_writel(hipcie, PCIE_SYS_CTRL7, regval);
+
+	return dw_pcie_wait_for_link(pci);
+}
+
+static int histb_pcie_host_init(struct pcie_port *pp)
+{
+	histb_pcie_establish_link(pp);
+
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		dw_pcie_msi_init(pp);
+
+	return 0;
+}
+
+static const struct dw_pcie_host_ops histb_pcie_host_ops = {
+	.rd_own_conf = histb_pcie_rd_own_conf,
+	.wr_own_conf = histb_pcie_wr_own_conf,
+	.host_init = histb_pcie_host_init,
+};
+
+static void histb_pcie_host_disable(struct histb_pcie *hipcie)
+{
+	reset_control_assert(hipcie->soft_reset);
+	reset_control_assert(hipcie->sys_reset);
+	reset_control_assert(hipcie->bus_reset);
+
+	clk_disable_unprepare(hipcie->aux_clk);
+	clk_disable_unprepare(hipcie->pipe_clk);
+	clk_disable_unprepare(hipcie->sys_clk);
+	clk_disable_unprepare(hipcie->bus_clk);
+
+	if (gpio_is_valid(hipcie->reset_gpio))
+		gpio_set_value_cansleep(hipcie->reset_gpio, 0);
+
+	if (hipcie->vpcie)
+		regulator_disable(hipcie->vpcie);
+}
+
+static int histb_pcie_host_enable(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct histb_pcie *hipcie = to_histb_pcie(pci);
+	struct device *dev = pci->dev;
+	int ret;
+
+	/* power on PCIe device if have */
+	if (hipcie->vpcie) {
+		ret = regulator_enable(hipcie->vpcie);
+		if (ret) {
+			dev_err(dev, "failed to enable regulator: %d\n", ret);
+			return ret;
+		}
+	}
+
+	if (gpio_is_valid(hipcie->reset_gpio))
+		gpio_set_value_cansleep(hipcie->reset_gpio, 1);
+
+	ret = clk_prepare_enable(hipcie->bus_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable bus clk\n");
+		goto err_bus_clk;
+	}
+
+	ret = clk_prepare_enable(hipcie->sys_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable sys clk\n");
+		goto err_sys_clk;
+	}
+
+	ret = clk_prepare_enable(hipcie->pipe_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable pipe clk\n");
+		goto err_pipe_clk;
+	}
+
+	ret = clk_prepare_enable(hipcie->aux_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable aux clk\n");
+		goto err_aux_clk;
+	}
+
+	reset_control_assert(hipcie->soft_reset);
+	reset_control_deassert(hipcie->soft_reset);
+
+	reset_control_assert(hipcie->sys_reset);
+	reset_control_deassert(hipcie->sys_reset);
+
+	reset_control_assert(hipcie->bus_reset);
+	reset_control_deassert(hipcie->bus_reset);
+
+	return 0;
+
+err_aux_clk:
+	clk_disable_unprepare(hipcie->pipe_clk);
+err_pipe_clk:
+	clk_disable_unprepare(hipcie->sys_clk);
+err_sys_clk:
+	clk_disable_unprepare(hipcie->bus_clk);
+err_bus_clk:
+	if (hipcie->vpcie)
+		regulator_disable(hipcie->vpcie);
+
+	return ret;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	.read_dbi = histb_pcie_read_dbi,
+	.write_dbi = histb_pcie_write_dbi,
+	.link_up = histb_pcie_link_up,
+};
+
+static int histb_pcie_probe(struct platform_device *pdev)
+{
+	struct histb_pcie *hipcie;
+	struct dw_pcie *pci;
+	struct pcie_port *pp;
+	struct resource *res;
+	struct device_node *np = pdev->dev.of_node;
+	struct device *dev = &pdev->dev;
+	enum of_gpio_flags of_flags;
+	unsigned long flag = GPIOF_DIR_OUT;
+	int ret;
+
+	hipcie = devm_kzalloc(dev, sizeof(*hipcie), GFP_KERNEL);
+	if (!hipcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	hipcie->pci = pci;
+	pp = &pci->pp;
+	pci->dev = dev;
+	pci->ops = &dw_pcie_ops;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control");
+	hipcie->ctrl = devm_ioremap_resource(dev, res);
+	if (IS_ERR(hipcie->ctrl)) {
+		dev_err(dev, "cannot get control reg base\n");
+		return PTR_ERR(hipcie->ctrl);
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc-dbi");
+	pci->dbi_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pci->dbi_base)) {
+		dev_err(dev, "cannot get rc-dbi base\n");
+		return PTR_ERR(pci->dbi_base);
+	}
+
+	hipcie->vpcie = devm_regulator_get_optional(dev, "vpcie");
+	if (IS_ERR(hipcie->vpcie)) {
+		if (PTR_ERR(hipcie->vpcie) != -ENODEV)
+			return PTR_ERR(hipcie->vpcie);
+		hipcie->vpcie = NULL;
+	}
+
+	hipcie->reset_gpio = of_get_named_gpio_flags(np,
+				"reset-gpios", 0, &of_flags);
+	if (of_flags & OF_GPIO_ACTIVE_LOW)
+		flag |= GPIOF_ACTIVE_LOW;
+	if (gpio_is_valid(hipcie->reset_gpio)) {
+		ret = devm_gpio_request_one(dev, hipcie->reset_gpio,
+				flag, "PCIe device power control");
+		if (ret) {
+			dev_err(dev, "unable to request gpio\n");
+			return ret;
+		}
+	}
+
+	hipcie->aux_clk = devm_clk_get(dev, "aux");
+	if (IS_ERR(hipcie->aux_clk)) {
+		dev_err(dev, "Failed to get PCIe aux clk\n");
+		return PTR_ERR(hipcie->aux_clk);
+	}
+
+	hipcie->pipe_clk = devm_clk_get(dev, "pipe");
+	if (IS_ERR(hipcie->pipe_clk)) {
+		dev_err(dev, "Failed to get PCIe pipe clk\n");
+		return PTR_ERR(hipcie->pipe_clk);
+	}
+
+	hipcie->sys_clk = devm_clk_get(dev, "sys");
+	if (IS_ERR(hipcie->sys_clk)) {
+		dev_err(dev, "Failed to get PCIEe sys clk\n");
+		return PTR_ERR(hipcie->sys_clk);
+	}
+
+	hipcie->bus_clk = devm_clk_get(dev, "bus");
+	if (IS_ERR(hipcie->bus_clk)) {
+		dev_err(dev, "Failed to get PCIe bus clk\n");
+		return PTR_ERR(hipcie->bus_clk);
+	}
+
+	hipcie->soft_reset = devm_reset_control_get(dev, "soft");
+	if (IS_ERR(hipcie->soft_reset)) {
+		dev_err(dev, "couldn't get soft reset\n");
+		return PTR_ERR(hipcie->soft_reset);
+	}
+
+	hipcie->sys_reset = devm_reset_control_get(dev, "sys");
+	if (IS_ERR(hipcie->sys_reset)) {
+		dev_err(dev, "couldn't get sys reset\n");
+		return PTR_ERR(hipcie->sys_reset);
+	}
+
+	hipcie->bus_reset = devm_reset_control_get(dev, "bus");
+	if (IS_ERR(hipcie->bus_reset)) {
+		dev_err(dev, "couldn't get bus reset\n");
+		return PTR_ERR(hipcie->bus_reset);
+	}
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		pp->msi_irq = platform_get_irq_byname(pdev, "msi");
+		if (pp->msi_irq < 0) {
+			dev_err(dev, "Failed to get MSI IRQ\n");
+			return pp->msi_irq;
+		}
+	}
+
+	hipcie->phy = devm_phy_get(dev, "phy");
+	if (IS_ERR(hipcie->phy)) {
+		dev_info(dev, "no pcie-phy found\n");
+		hipcie->phy = NULL;
+		/* fall through here!
+		 * if no pcie-phy found, phy init
+		 * should be done under boot!
+		 */
+	} else {
+		phy_init(hipcie->phy);
+	}
+
+	pp->ops = &histb_pcie_host_ops;
+
+	platform_set_drvdata(pdev, hipcie);
+
+	ret = histb_pcie_host_enable(pp);
+	if (ret) {
+		dev_err(dev, "failed to enable host\n");
+		return ret;
+	}
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "failed to initialize host\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int histb_pcie_remove(struct platform_device *pdev)
+{
+	struct histb_pcie *hipcie = platform_get_drvdata(pdev);
+
+	histb_pcie_host_disable(hipcie);
+
+	if (hipcie->phy)
+		phy_exit(hipcie->phy);
+
+	return 0;
+}
+
+static const struct of_device_id histb_pcie_of_match[] = {
+	{ .compatible = "hisilicon,hi3798cv200-pcie", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, histb_pcie_of_match);
+
+static struct platform_driver histb_pcie_platform_driver = {
+	.probe	= histb_pcie_probe,
+	.remove	= histb_pcie_remove,
+	.driver = {
+		.name = "histb-pcie",
+		.of_match_table = histb_pcie_of_match,
+	},
+};
+module_platform_driver(histb_pcie_platform_driver);
+
+MODULE_DESCRIPTION("HiSilicon STB PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-kestrel.c b/marvell/linux/drivers/pci/controller/dwc/pcie-kestrel.c
new file mode 100644
index 0000000..c8b367a
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-kestrel.c
@@ -0,0 +1,714 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for ASR ASR1901 SoCs
+ *
+ * SR1901 PCIe Glue Layer Source Code
+ *
+ * Copyright (C) 2022 ASR Technology Group Ltd.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/of_pci.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/pm_qos.h>
+#include <linux/cputype.h>
+#include <soc/asr/regs-addr.h>
+#include "pcie-designware.h"
+
+#define DEVICE_NAME "ASR1901 PCIe Host"
+
+#define APMU_PCIE_CLK_RES_CTRL		0x3CC
+#define APMU_PCIE_CTRL_LOGIC		0x3D0
+#define APMU_PCIE2_CLK_RES_CTRL		0x3E4
+#define APMU_PCIE2_CTRL_LOGIC		0x3E8
+#define APMU_USB3PHY0_CTRL0		0x3B8
+
+#define HSIO_RC_R_CAL_STATUS		(0x15c)
+#define PCIE_CAL_DONE			(0x3<<14)
+
+#define LANE1_OFFSET			0x400
+#define	LTSSM_EN			(0x1 << 6)
+#define	APP_HOLD_PHY_RST		(0x1 << 30)
+#define	DEVICE_TYPE_RC			(0x1 << 31)		/* BIT31 0: EP, 1: RC*/
+
+/* PCIe Config registers */
+/* PCIe controller wrapper ASR configuration registers */
+#define	PCIE_AHB_IRQ			0x0000
+#define	IRQ_EN				0x1
+
+#define	PCIE_AHB_LINK_STS		0x0004
+#define DLL_LINK_UP            		(0x1 << 12)
+#define PHY_LINK_UP            		(0x1 << 1)
+#define LTSSM_L0			(0x11 << 6)
+#define LTSSM_STS			(0x3f << 6)
+
+#define PCIE_AHB_LEGACY_INT		0x0008
+#define PLL_READY			(0x1)
+
+#define	PCIE_AHB_IRQENABLE_SET_INTX	0x000c
+#define	INTA				(0x1 << 6)
+#define	INTB				(0x1 << 7)
+#define	INTC				(0x1 << 8)
+#define	INTD				(0x1 << 9)
+#define	LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
+#define INTX_MASK			GENMASK(9, 6)
+#define INTX_SHIFT			6
+
+#define	PCIE_AHB_IRQSTATUS		0x0010
+#define	PCIE_AHB_IRQENABLE_SET		0x0014
+#define	MSI_INT				(0x1 << 11)
+#define	DLL_LINK_INT			(0x1 << 20)
+
+/* PCIe PHY registers */
+#define PUPHY_CLK_CFG			0x8
+#define PUPHY_MODE_CFG			0x0c
+#define PUPHY_ERROR_STATUS		0x10
+#define PUPHY_OVERRIDE			0x18
+#define PUPHY_RC_REG			0x44
+#define PUPHY_PCIE3X2_REG		0x50
+#define PUPHY_PLL_REG1			0x58
+#define PUPHY_PLL_REG2			0x5c
+#define PUPHY_RX_REG1               	0x60
+#define PUPHY_TX_REG                	0x74
+#define PUPHY_TEST_REG			0x78
+#define PUPHY_TEST_INFO			0x84
+
+#define OVRD_MPU_U3			(0x1 << 17)
+#define CFG_MPU_U3			(0x1 << 16)
+#define OVRD_PU_RX_LFPS			(0x1 << 15)
+
+#define LINK_WAIT_MIN			900
+#define LINK_WAIT_MAX			1000
+/* Time for delay */
+#define REF_PERST_MIN			20000
+#define REF_PERST_MAX			25000
+#define PERST_ACCESS_MIN		10000
+#define PERST_ACCESS_MAX		12000
+
+#define to_kst_pcie(x)	dev_get_drvdata((x)->dev)
+
+struct kst_pcie {
+	struct device *dev;
+	struct dw_pcie *pci;
+	void __iomem        *phy_base;
+	void __iomem        *app_base;
+	void __iomem        *usb3_base;
+	struct clk			*clk;
+	struct phy			*phy[2];
+	unsigned int	phy_count;
+	unsigned int	slot;
+	unsigned int	lanes;
+	void __iomem 	*pcie_pmua_reg;
+	s32				lpm_qos;
+	int				irq;
+	int				gpio_reset;
+	struct pm_qos_request   qos_idle;
+};
+
+extern u32 usb31_rterm_cal_value;
+
+static inline void kst_phy_writel(struct kst_pcie *pcie, u32 val, u32 reg)
+{
+	writel(val, pcie->phy_base + reg);
+}
+static inline u32 kst_phy_readl(struct kst_pcie *pcie, u32 reg)
+{
+	return readl(pcie->phy_base + reg);
+}
+static inline void kst_app_writel(struct kst_pcie *pcie, u32 val, u32 reg)
+{
+	writel(val, pcie->app_base + reg);
+}
+static inline u32 kst_app_readl(struct kst_pcie *pcie, u32 reg)
+{
+	return readl(pcie->app_base + reg);
+}
+
+#ifndef CONFIG_USB_DWC3
+static inline void kst_usb30phy_writel(struct kst_pcie *pcie, u32 val, u32 reg)
+{
+	writel(val, pcie->usb3_base + reg);
+}
+static inline u32 kst_usb30phy_readl(struct kst_pcie *pcie, u32 reg)
+{
+	return readl(pcie->usb3_base + reg);
+}
+int rterm_val, cali_done = 0;
+#define USB_CALI_TIMEOUT 50000
+static void kst_usb_cali_phy(struct kst_pcie *kst_pcie)
+{
+	u32 val, timeout = 0;
+	if (cali_done == 1)
+		return;
+
+	cali_done = 1;
+	writel(0x1e00b, regs_addr_get_va(REGS_ADDR_APMU) + APMU_USB3PHY0_CTRL0);
+
+	val = kst_usb30phy_readl(kst_pcie, PUPHY_PLL_REG2);
+	kst_usb30phy_writel(kst_pcie, val&(~(0x1<<21)), PUPHY_PLL_REG2);
+
+	val = kst_usb30phy_readl(kst_pcie, PUPHY_PLL_REG1);
+	kst_usb30phy_writel(kst_pcie, val&(0xFFFFC0FF), PUPHY_PLL_REG1);
+
+	do {
+		val = kst_usb30phy_readl(kst_pcie, PUPHY_TEST_INFO);
+		udelay(10);
+		timeout++;
+		if (timeout > USB_CALI_TIMEOUT)
+			break;
+	} while (((val>>24)&0x1) == 0);
+
+	val = kst_usb30phy_readl(kst_pcie, PUPHY_TEST_INFO);
+	val = kst_usb30phy_readl(kst_pcie, PUPHY_TEST_INFO);
+	pr_debug("usb rterm = %08x\r\n", (val>>8) & 0x000000FF);
+	writel(0xb, regs_addr_get_va(REGS_ADDR_APMU) + APMU_USB3PHY0_CTRL0);
+
+	rterm_val = val;
+}
+
+static void kst2_usb_cali_phy(void)
+{
+	u32 value;
+	int timeout = 0;
+	void __iomem *apbs_base = regs_addr_get_va(REGS_ADDR_APBS);
+
+	value = readl(apbs_base + HSIO_RC_R_CAL_STATUS);
+	value &= ~0x1;
+	writel(value, apbs_base + HSIO_RC_R_CAL_STATUS);
+
+	do {
+		value = readl(apbs_base + HSIO_RC_R_CAL_STATUS);
+		udelay(10);
+		timeout++;
+		if (timeout > USB_CALI_TIMEOUT) {
+			pr_err("PHY Calibration fail");
+			break;
+		}
+	} while ((value&PCIE_CAL_DONE) != PCIE_CAL_DONE);
+}
+
+#endif
+
+static int kst_pcie_init_phy(struct kst_pcie *kst_pcie)
+{
+	u32 val, data;
+	int count = 0;
+
+	/* release pcie reset and enable pcie axi clk */
+	__raw_writel(0x8000043f, kst_pcie->pcie_pmua_reg);
+	val = __raw_readl(kst_pcie->pcie_pmua_reg);
+	val |= DEVICE_TYPE_RC;
+	val &= ~APP_HOLD_PHY_RST;
+	__raw_writel(val, kst_pcie->pcie_pmua_reg);
+	/* enable port0 dbi aclock for port1 only case */
+	if (kst_pcie->slot == 1) {
+		val = __raw_readl(regs_addr_get_va(REGS_ADDR_APMU)
+                       + APMU_PCIE_CLK_RES_CTRL);
+		if (!(val&0x9)) {
+			val |= (0x80000009);
+			__raw_writel(val, regs_addr_get_va(REGS_ADDR_APMU)
+				+ APMU_PCIE_CLK_RES_CTRL);
+		}
+	}
+
+	val = kst_phy_readl(kst_pcie, PUPHY_OVERRIDE);
+	val |= (OVRD_MPU_U3 | OVRD_PU_RX_LFPS);
+	kst_phy_writel(kst_pcie, val, PUPHY_OVERRIDE);
+	if (kst_pcie->lanes == 2) {
+		val = kst_phy_readl(kst_pcie, PUPHY_OVERRIDE + LANE1_OFFSET);
+		val |= (OVRD_MPU_U3 | OVRD_PU_RX_LFPS);
+		kst_phy_writel(kst_pcie, val, PUPHY_OVERRIDE + LANE1_OFFSET);
+	}
+
+	val = kst_phy_readl(kst_pcie, PUPHY_RC_REG);
+	val = (val & 0xFFFF0000) | (0x1387);
+	kst_phy_writel(kst_pcie, val, PUPHY_RC_REG);
+
+	if (cpu_is_asr1901_a0_plus()) {
+#ifndef CONFIG_USB_DWC3
+        kst_usb_cali_phy(kst_pcie);
+        val = rterm_val;
+#else
+        val = usb31_rterm_cal_value;
+#endif
+        data = kst_phy_readl(kst_pcie, PUPHY_RC_REG);
+        data = (data & 0xffffff00) | ((val>>8) & 0xFF);
+        kst_phy_writel(kst_pcie, data, PUPHY_RC_REG);
+        pr_debug("pcie rterm = %08x\r\n", kst_phy_readl(kst_pcie, PUPHY_RC_REG));
+    }
+#ifndef CONFIG_USB_DWC3
+    if (cpu_is_asr1906()) {
+        kst2_usb_cali_phy();
+    }
+#endif
+	val = kst_phy_readl(kst_pcie, PUPHY_TEST_REG);
+	val |= (0x1<<1);
+	kst_phy_writel(kst_pcie, val, PUPHY_TEST_REG);
+	if (kst_pcie->lanes == 2) {
+		val = kst_phy_readl(kst_pcie, PUPHY_TEST_REG + LANE1_OFFSET);
+		val |= (0x1<<1);
+		kst_phy_writel(kst_pcie, val, PUPHY_TEST_REG + LANE1_OFFSET);
+	}
+
+	val = kst_phy_readl(kst_pcie, PUPHY_OVERRIDE);
+	val &= ~(OVRD_MPU_U3 | OVRD_PU_RX_LFPS);
+	kst_phy_writel(kst_pcie, val, PUPHY_OVERRIDE);
+	if (kst_pcie->lanes == 2) {
+		val = kst_phy_readl(kst_pcie, PUPHY_OVERRIDE + LANE1_OFFSET);
+		val &= ~(OVRD_MPU_U3 | OVRD_PU_RX_LFPS);
+		kst_phy_writel(kst_pcie, val, PUPHY_OVERRIDE + LANE1_OFFSET);
+	}
+
+	val = kst_phy_readl(kst_pcie, PUPHY_PLL_REG1);
+	kst_phy_writel(kst_pcie, val&0xffff0fff, PUPHY_PLL_REG1);
+	val = kst_phy_readl(kst_pcie, PUPHY_PLL_REG1);
+	kst_phy_writel(kst_pcie, val|0xC000, PUPHY_PLL_REG1);
+	val = kst_phy_readl(kst_pcie, PUPHY_PLL_REG2);
+	kst_phy_writel(kst_pcie, val|(0x1<<20), PUPHY_PLL_REG2);
+	val = kst_phy_readl(kst_pcie, PUPHY_PLL_REG2);
+	kst_phy_writel(kst_pcie, val&(~(0x1<<21)), PUPHY_PLL_REG2);
+	kst_phy_writel(kst_pcie, 0x6505, PUPHY_PCIE3X2_REG);
+
+	val = kst_phy_readl(kst_pcie, PUPHY_PLL_REG1);
+	kst_phy_writel(kst_pcie, val&0xf0ffffff, PUPHY_PLL_REG1);
+	val = kst_phy_readl(kst_pcie, PUPHY_CLK_CFG);
+	kst_phy_writel(kst_pcie, 0xB7c, PUPHY_CLK_CFG);
+
+	if (kst_pcie->lanes == 1) {
+		pr_debug("PCIE only one Lane, disable Lane1...\r\n");
+		val = kst_phy_readl(kst_pcie, PUPHY_MODE_CFG + LANE1_OFFSET);
+		kst_phy_writel(kst_pcie, val|(0x1<<30), PUPHY_MODE_CFG + LANE1_OFFSET);
+	} else {
+		kst_phy_writel(kst_pcie, 0xB7c, PUPHY_CLK_CFG + LANE1_OFFSET);
+	}
+
+	val = kst_phy_readl(kst_pcie, PUPHY_MODE_CFG);
+	val |= (0x1<<2);
+	kst_phy_writel(kst_pcie, val, PUPHY_MODE_CFG);
+	if (kst_pcie->lanes == 2) {
+		val = kst_phy_readl(kst_pcie, PUPHY_MODE_CFG + LANE1_OFFSET);
+		val |= (0x1<<2);
+		kst_phy_writel(kst_pcie, val, PUPHY_MODE_CFG + LANE1_OFFSET);
+	}
+
+	do {
+		val = kst_phy_readl(kst_pcie, PUPHY_CLK_CFG);
+		count++;
+		usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
+		if (count == 100) {
+			pr_info(DEVICE_NAME "PCIe wait pll ready timeout.\n");
+			return -EINVAL;
+		}
+	}while(( val & PLL_READY ) != PLL_READY);
+
+	return 0;
+}
+
+static int kst_pcie_disable_phy(struct kst_pcie *kst_pcie)
+{
+	u32 val;
+
+	val = kst_phy_readl(kst_pcie, PUPHY_OVERRIDE);
+	val |= OVRD_MPU_U3;
+	val &= ~CFG_MPU_U3;
+	kst_phy_writel(kst_pcie, val, PUPHY_OVERRIDE);
+
+	val = kst_phy_readl(kst_pcie, PUPHY_OVERRIDE + LANE1_OFFSET);
+	val |= OVRD_MPU_U3;
+	val &= ~CFG_MPU_U3;
+
+	kst_phy_writel(kst_pcie, val, PUPHY_OVERRIDE + LANE1_OFFSET);
+
+	return 0;
+}
+
+static int kst_pcie_reenable_phy(struct kst_pcie *kst_pcie)
+{
+	u32 val;
+
+	val = kst_phy_readl(kst_pcie, PUPHY_OVERRIDE);
+	val &= ~(OVRD_MPU_U3 | OVRD_PU_RX_LFPS);
+	kst_phy_writel(kst_pcie, val, PUPHY_OVERRIDE);
+	if (kst_pcie->lanes == 2) {
+		val = kst_phy_readl(kst_pcie, PUPHY_OVERRIDE + LANE1_OFFSET);
+		val &= ~(OVRD_MPU_U3 | OVRD_PU_RX_LFPS);
+		kst_phy_writel(kst_pcie, val, PUPHY_OVERRIDE + LANE1_OFFSET);
+	}
+
+	return 0;
+}
+
+static int kst_pcie_link_up(struct dw_pcie *pci)
+{
+	struct kst_pcie *kst_pcie = to_kst_pcie(pci);
+	u32 status = kst_app_readl(kst_pcie, PCIE_AHB_LINK_STS);
+
+	if ((status & DLL_LINK_UP) && (status & PHY_LINK_UP))
+		return 1;
+
+	return 0;
+}
+
+static int kst_pcie_establish_link(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct kst_pcie *kst_pcie = to_kst_pcie(pci);
+	struct device *dev = kst_pcie->pci->dev;
+	unsigned int val, count = 0;
+
+	if (kst_pcie_link_up(pci))
+		return 0;
+
+	val = __raw_readl(kst_pcie->pcie_pmua_reg);
+	__raw_writel(val | DEVICE_TYPE_RC, kst_pcie->pcie_pmua_reg);
+
+	dw_pcie_setup_rc(pp);
+#if 0
+	val = readl(pci->dbi_base + 0x8a8);
+	val &= ~(0xffff<<8);
+	val |= (0x10<<8);				//set TX preset P4
+	writel(val, pci->dbi_base + 0x8a8);
+#endif
+	/* Release app_hold_phy_reset and enable ltssm */
+	val = __raw_readl(kst_pcie->pcie_pmua_reg);
+	val |= LTSSM_EN;
+	val &= ~APP_HOLD_PHY_RST;
+	__raw_writel(val, kst_pcie->pcie_pmua_reg);
+
+	udelay(10);
+	val = readl(pci->dbi_base + 0x80c);
+	val |= 1<<17;
+	writel(val, pci->dbi_base + 0x80c);
+	udelay(10);
+
+	do {
+		usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
+		val = kst_app_readl(kst_pcie, PCIE_AHB_LINK_STS);
+		pr_debug("%s, ltssm: 0x%x.\n", __func__, val);
+		count++;
+		if (count == 1000) {
+			pr_info(DEVICE_NAME "PCIe enter L0 failed, ltssm: 0x%x\n", val);
+			return -EINVAL;
+		}
+	}while(( val & LTSSM_STS ) != LTSSM_L0 );
+
+	count = 0;
+	/* check if the link is up or not */
+	while (!kst_pcie_link_up(pci)) {
+		usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
+		count++;
+		if (count == 100) {
+			dev_err(dev, "Link Fail\n");
+			return -EINVAL;
+		}
+	}
+
+	val = readl(pci->dbi_base + 0x80);
+	pr_info(DEVICE_NAME " %dx link negotiated (gen %d)\n",
+				(val>>20)&0xf, (val>>16)&0xf);
+
+	return 0;
+}
+
+static int kst_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct kst_pcie *kst_pcie = to_kst_pcie(pci);
+	int ret;
+
+	ret = kst_pcie_establish_link(pp);
+	if(ret) {
+		__raw_writel(0x0, kst_pcie->pcie_pmua_reg);
+		return -1;
+	}
+	return 0;
+}
+static const struct dw_pcie_host_ops kst_pcie_host_ops = {
+	.host_init = kst_pcie_host_init,
+};
+
+#ifndef CONFIG_PCI_MSI
+static irqreturn_t kst_pcie_irq_handler(int irq, void *arg)
+{
+	struct kst_pcie *kst_pcie = arg;
+	u32 status;
+
+	pm_wakeup_event(kst_pcie->dev, 2000);
+	status = kst_app_readl(kst_pcie, PCIE_AHB_LEGACY_INT);
+	kst_app_writel(kst_pcie, status, PCIE_AHB_LEGACY_INT);
+
+	return IRQ_HANDLED;
+}
+#endif
+#ifdef CONFIG_PCI_MSI
+static int kst_pcie_add_msi(struct dw_pcie *pci,
+				struct platform_device *pdev)
+{
+	int irq;
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		irq = platform_get_irq(pdev, 0);
+		if (irq < 0) {
+			dev_err(&pdev->dev,
+				"failed to get MSI IRQ (%d)\n", irq);
+			return irq;
+		}
+		pci->pp.msi_irq = irq;
+	}
+
+	return 0;
+}
+#endif
+
+static void kst_pcie_enable_interrupts(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct kst_pcie *kst_pcie = to_kst_pcie(pci);
+	u32 val;
+
+#ifdef CONFIG_PCI_MSI
+	dw_pcie_msi_init(pp);
+
+	val = kst_app_readl(kst_pcie, PCIE_AHB_IRQENABLE_SET);
+	val |= MSI_INT;
+	kst_app_writel(kst_pcie, val, PCIE_AHB_IRQENABLE_SET);
+#else	/* legacy interrupt */
+	val = kst_app_readl(kst_pcie, PCIE_AHB_IRQENABLE_SET_INTX);
+	val |= LEG_EP_INTERRUPTS;
+	kst_app_writel(kst_pcie, val, PCIE_AHB_IRQENABLE_SET_INTX);
+#endif
+	val = kst_app_readl(kst_pcie, PCIE_AHB_IRQ);
+	val |= IRQ_EN;
+	kst_app_writel(kst_pcie, val, PCIE_AHB_IRQ);
+
+	return;
+}
+
+static int kst_add_pcie_port(struct kst_pcie *pcie,
+				  struct platform_device *pdev)
+{
+	struct dw_pcie *pci = pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	pp->ops = &kst_pcie_host_ops;
+#ifdef CONFIG_PCI_MSI
+	ret = kst_pcie_add_msi(pci, pdev);
+	if (ret)
+		return ret;
+#else
+	pp->irq = platform_get_irq(pdev, 0);
+	if (pp->irq < 0) {
+		dev_err(dev, "failed to get irq for port\n");
+		return pp->irq;
+	}
+	ret = devm_request_irq(dev, pp->irq, kst_pcie_irq_handler,
+			       IRQF_SHARED, "kst-pcie", pcie);
+	if (ret) {
+		dev_err(dev, "failed to request irq %d\n", pp->irq);
+		return ret;
+	}
+#endif
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "failed to initialize host: %d\n", ret);
+		return ret;
+	}
+	kst_pcie_enable_interrupts(pp);
+
+	return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	.link_up = kst_pcie_link_up,
+};
+
+static long kst_pcie_get_resource(struct kst_pcie *kst_pcie,
+				    struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *phy;
+	struct resource *dbi;
+	struct device_node *np = pdev->dev.of_node;
+	const __be32 *prop;
+	unsigned int proplen;
+
+	phy = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-phy");
+	kst_pcie->phy_base = devm_ioremap_resource(dev, phy);
+	if (IS_ERR(kst_pcie->phy_base))
+		return PTR_ERR(kst_pcie->phy_base);
+	kst_pcie->app_base = kst_pcie->phy_base + 0x10000;
+
+	dbi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-dbi");
+	kst_pcie->pci->dbi_base = devm_ioremap_resource(dev, dbi);
+	if (IS_ERR(kst_pcie->pci->dbi_base))
+		return PTR_ERR(kst_pcie->pci->dbi_base);
+#ifndef CONFIG_USB_DWC3
+	if (cali_done == 0) {	//for 2 pcie only remap 1 usb3-phy address
+		dbi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "usb3-phy");
+		kst_pcie->usb3_base = devm_ioremap_resource(dev, dbi);
+		if (IS_ERR(kst_pcie->usb3_base))
+			return PTR_ERR(kst_pcie->usb3_base);
+	}
+#endif
+	if (of_property_read_u32(np, "num-lanes", &(kst_pcie->lanes))) {
+		pr_err("Failed to parse the PCIE0 or PCIE1 lane number\n");
+		return -EINVAL;
+	}
+
+	prop = of_get_property(np, "lpm-qos", &proplen);
+	if (!prop) {
+		pr_err("lpm-qos config in DT for PCIe is not defined\n");
+		return -EINVAL;
+	} else
+		kst_pcie->lpm_qos = be32_to_cpup(prop);
+
+	if (of_property_read_u32(np, "num-slot", &(kst_pcie->slot))) {
+		pr_err("Failed to parse the PCIE0 or PCIE1\n");
+		return -EINVAL;
+	}
+	kst_pcie->gpio_reset = of_get_named_gpio(np, "reset-gpios", 0);
+	if (kst_pcie->gpio_reset < 0)
+		return -ENODEV;
+
+	if (kst_pcie->slot == 0)
+		kst_pcie->pcie_pmua_reg = regs_addr_get_va(REGS_ADDR_APMU)
+					+ APMU_PCIE_CLK_RES_CTRL;
+	else if (kst_pcie->slot == 1)
+		kst_pcie->pcie_pmua_reg = regs_addr_get_va(REGS_ADDR_APMU)
+					+ APMU_PCIE2_CLK_RES_CTRL;
+
+	return 0;
+}
+
+static int kst_pcie_probe(struct platform_device *pdev)
+{
+	struct dw_pcie *pci;
+	struct kst_pcie *pcie;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+	if (!pcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	pci->dev = dev;
+	pci->ops = &dw_pcie_ops;
+	pcie->pci = pci;
+
+	kst_pcie_get_resource(pcie, pdev);
+
+	pcie->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(pcie->clk))
+		return PTR_ERR(pcie->clk);
+
+	ret = clk_prepare_enable(pcie->clk);
+	if (ret)
+		return ret;
+
+	ret = kst_pcie_init_phy(pcie);
+	if (ret)
+		goto fail_clk;
+
+	/* perst assert Endpoint */
+	if (!gpio_request(pcie->gpio_reset, "pcie_perst")) {
+		usleep_range(REF_PERST_MIN, REF_PERST_MAX);
+		ret = gpio_direction_output(pcie->gpio_reset, 1);
+		if (ret) {
+			pr_info(DEVICE_NAME "PCIE reset device failed.\r\n");
+			goto disable_phy;
+		}
+		gpio_free(pcie->gpio_reset);
+		usleep_range(PERST_ACCESS_MIN, PERST_ACCESS_MAX);
+	}
+	platform_set_drvdata(pdev, pcie);
+
+	ret = kst_add_pcie_port(pcie, pdev);
+	if (ret)
+		goto disable_phy;
+
+	device_init_wakeup(&pdev->dev, 1);
+	pm_qos_add_request(&pcie->qos_idle, PM_QOS_CPUIDLE_BLOCK,
+			PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
+	pcie->qos_idle.name = pdev->name;
+	pm_qos_update_request(&pcie->qos_idle, pcie->lpm_qos);
+
+	return 0;
+
+disable_phy:
+	kst_pcie_disable_phy(pcie);
+fail_clk:
+	clk_disable_unprepare(pcie->clk);
+
+	return ret;
+}
+#ifdef CONFIG_PM_SLEEP
+static int __maybe_unused kst_pcie_suspend_noirq(struct device *dev)
+{
+	struct kst_pcie *pcie = dev_get_drvdata(dev);
+
+	kst_pcie_disable_phy(pcie);
+	pm_qos_update_request(&pcie->qos_idle,
+			PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
+
+	return 0;
+}
+
+static int __maybe_unused kst_pcie_resume_noirq(struct device *dev)
+{
+	struct kst_pcie *pcie = dev_get_drvdata(dev);
+
+	kst_pcie_reenable_phy(pcie);
+	pm_qos_update_request(&pcie->qos_idle, pcie->lpm_qos);
+
+	return 0;
+}
+
+static const struct dev_pm_ops kst_pcie_pm_ops = {
+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(kst_pcie_suspend_noirq,
+				      kst_pcie_resume_noirq)
+};
+#endif
+
+static const struct of_device_id kst_pcie_of_match[] = {
+	{ .compatible = "asr,kst-pcie", },
+	{},
+};
+
+static struct platform_driver kst_pcie_driver = {
+	.probe		= kst_pcie_probe,
+	.driver = {
+		.name	= "kst-pcie",
+		.of_match_table = of_match_ptr(kst_pcie_of_match),
+		.suppress_bind_attrs = true,
+#ifdef CONFIG_PM_SLEEP
+		.pm = &kst_pcie_pm_ops,
+#endif
+	},
+};
+
+static int __init kst_pcie_init(void)
+{
+	return platform_driver_probe(&kst_pcie_driver, kst_pcie_probe);
+}
+device_initcall_sync(kst_pcie_init);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-kirin.c b/marvell/linux/drivers/pci/controller/dwc/pcie-kirin.c
new file mode 100644
index 0000000..c19617a
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-kirin.c
@@ -0,0 +1,543 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Kirin Phone SoCs
+ *
+ * Copyright (C) 2017 HiSilicon Electronics Co., Ltd.
+ *		http://www.huawei.com
+ *
+ * Author: Xiaowei Song <songxiaowei@huawei.com>
+ */
+
+#include <linux/compiler.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+#include "pcie-designware.h"
+
+#define to_kirin_pcie(x) dev_get_drvdata((x)->dev)
+
+#define REF_CLK_FREQ			100000000
+
+/* PCIe ELBI registers */
+#define SOC_PCIECTRL_CTRL0_ADDR		0x000
+#define SOC_PCIECTRL_CTRL1_ADDR		0x004
+#define SOC_PCIEPHY_CTRL2_ADDR		0x008
+#define SOC_PCIEPHY_CTRL3_ADDR		0x00c
+#define PCIE_ELBI_SLV_DBI_ENABLE	(0x1 << 21)
+
+/* info located in APB */
+#define PCIE_APP_LTSSM_ENABLE	0x01c
+#define PCIE_APB_PHY_CTRL0	0x0
+#define PCIE_APB_PHY_CTRL1	0x4
+#define PCIE_APB_PHY_STATUS0	0x400
+#define PCIE_LINKUP_ENABLE	(0x8020)
+#define PCIE_LTSSM_ENABLE_BIT	(0x1 << 11)
+#define PIPE_CLK_STABLE		(0x1 << 19)
+#define PHY_REF_PAD_BIT		(0x1 << 8)
+#define PHY_PWR_DOWN_BIT	(0x1 << 22)
+#define PHY_RST_ACK_BIT		(0x1 << 16)
+
+/* info located in sysctrl */
+#define SCTRL_PCIE_CMOS_OFFSET	0x60
+#define SCTRL_PCIE_CMOS_BIT	0x10
+#define SCTRL_PCIE_ISO_OFFSET	0x44
+#define SCTRL_PCIE_ISO_BIT	0x30
+#define SCTRL_PCIE_HPCLK_OFFSET	0x190
+#define SCTRL_PCIE_HPCLK_BIT	0x184000
+#define SCTRL_PCIE_OE_OFFSET	0x14a
+#define PCIE_DEBOUNCE_PARAM	0xF0F400
+#define PCIE_OE_BYPASS		(0x3 << 28)
+
+/* peri_crg ctrl */
+#define CRGCTRL_PCIE_ASSERT_OFFSET	0x88
+#define CRGCTRL_PCIE_ASSERT_BIT		0x8c000000
+
+/* Time for delay */
+#define REF_2_PERST_MIN		20000
+#define REF_2_PERST_MAX		25000
+#define PERST_2_ACCESS_MIN	10000
+#define PERST_2_ACCESS_MAX	12000
+#define LINK_WAIT_MIN		900
+#define LINK_WAIT_MAX		1000
+#define PIPE_CLK_WAIT_MIN	550
+#define PIPE_CLK_WAIT_MAX	600
+#define TIME_CMOS_MIN		100
+#define TIME_CMOS_MAX		105
+#define TIME_PHY_PD_MIN		10
+#define TIME_PHY_PD_MAX		11
+
+struct kirin_pcie {
+	struct dw_pcie	*pci;
+	void __iomem	*apb_base;
+	void __iomem	*phy_base;
+	struct regmap	*crgctrl;
+	struct regmap	*sysctrl;
+	struct clk	*apb_sys_clk;
+	struct clk	*apb_phy_clk;
+	struct clk	*phy_ref_clk;
+	struct clk	*pcie_aclk;
+	struct clk	*pcie_aux_clk;
+	int		gpio_id_reset;
+};
+
+/* Registers in PCIeCTRL */
+static inline void kirin_apb_ctrl_writel(struct kirin_pcie *kirin_pcie,
+					 u32 val, u32 reg)
+{
+	writel(val, kirin_pcie->apb_base + reg);
+}
+
+static inline u32 kirin_apb_ctrl_readl(struct kirin_pcie *kirin_pcie, u32 reg)
+{
+	return readl(kirin_pcie->apb_base + reg);
+}
+
+/* Registers in PCIePHY */
+static inline void kirin_apb_phy_writel(struct kirin_pcie *kirin_pcie,
+					u32 val, u32 reg)
+{
+	writel(val, kirin_pcie->phy_base + reg);
+}
+
+static inline u32 kirin_apb_phy_readl(struct kirin_pcie *kirin_pcie, u32 reg)
+{
+	return readl(kirin_pcie->phy_base + reg);
+}
+
+static long kirin_pcie_get_clk(struct kirin_pcie *kirin_pcie,
+			       struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+
+	kirin_pcie->phy_ref_clk = devm_clk_get(dev, "pcie_phy_ref");
+	if (IS_ERR(kirin_pcie->phy_ref_clk))
+		return PTR_ERR(kirin_pcie->phy_ref_clk);
+
+	kirin_pcie->pcie_aux_clk = devm_clk_get(dev, "pcie_aux");
+	if (IS_ERR(kirin_pcie->pcie_aux_clk))
+		return PTR_ERR(kirin_pcie->pcie_aux_clk);
+
+	kirin_pcie->apb_phy_clk = devm_clk_get(dev, "pcie_apb_phy");
+	if (IS_ERR(kirin_pcie->apb_phy_clk))
+		return PTR_ERR(kirin_pcie->apb_phy_clk);
+
+	kirin_pcie->apb_sys_clk = devm_clk_get(dev, "pcie_apb_sys");
+	if (IS_ERR(kirin_pcie->apb_sys_clk))
+		return PTR_ERR(kirin_pcie->apb_sys_clk);
+
+	kirin_pcie->pcie_aclk = devm_clk_get(dev, "pcie_aclk");
+	if (IS_ERR(kirin_pcie->pcie_aclk))
+		return PTR_ERR(kirin_pcie->pcie_aclk);
+
+	return 0;
+}
+
+static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
+				    struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *apb;
+	struct resource *phy;
+	struct resource *dbi;
+
+	apb = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb");
+	kirin_pcie->apb_base = devm_ioremap_resource(dev, apb);
+	if (IS_ERR(kirin_pcie->apb_base))
+		return PTR_ERR(kirin_pcie->apb_base);
+
+	phy = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
+	kirin_pcie->phy_base = devm_ioremap_resource(dev, phy);
+	if (IS_ERR(kirin_pcie->phy_base))
+		return PTR_ERR(kirin_pcie->phy_base);
+
+	dbi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+	kirin_pcie->pci->dbi_base = devm_ioremap_resource(dev, dbi);
+	if (IS_ERR(kirin_pcie->pci->dbi_base))
+		return PTR_ERR(kirin_pcie->pci->dbi_base);
+
+	kirin_pcie->crgctrl =
+		syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl");
+	if (IS_ERR(kirin_pcie->crgctrl))
+		return PTR_ERR(kirin_pcie->crgctrl);
+
+	kirin_pcie->sysctrl =
+		syscon_regmap_lookup_by_compatible("hisilicon,hi3660-sctrl");
+	if (IS_ERR(kirin_pcie->sysctrl))
+		return PTR_ERR(kirin_pcie->sysctrl);
+
+	return 0;
+}
+
+static int kirin_pcie_phy_init(struct kirin_pcie *kirin_pcie)
+{
+	struct device *dev = kirin_pcie->pci->dev;
+	u32 reg_val;
+
+	reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1);
+	reg_val &= ~PHY_REF_PAD_BIT;
+	kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1);
+
+	reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL0);
+	reg_val &= ~PHY_PWR_DOWN_BIT;
+	kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL0);
+	usleep_range(TIME_PHY_PD_MIN, TIME_PHY_PD_MAX);
+
+	reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1);
+	reg_val &= ~PHY_RST_ACK_BIT;
+	kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1);
+
+	usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX);
+	reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_STATUS0);
+	if (reg_val & PIPE_CLK_STABLE) {
+		dev_err(dev, "PIPE clk is not stable\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void kirin_pcie_oe_enable(struct kirin_pcie *kirin_pcie)
+{
+	u32 val;
+
+	regmap_read(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, &val);
+	val |= PCIE_DEBOUNCE_PARAM;
+	val &= ~PCIE_OE_BYPASS;
+	regmap_write(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, val);
+}
+
+static int kirin_pcie_clk_ctrl(struct kirin_pcie *kirin_pcie, bool enable)
+{
+	int ret = 0;
+
+	if (!enable)
+		goto close_clk;
+
+	ret = clk_set_rate(kirin_pcie->phy_ref_clk, REF_CLK_FREQ);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(kirin_pcie->phy_ref_clk);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(kirin_pcie->apb_sys_clk);
+	if (ret)
+		goto apb_sys_fail;
+
+	ret = clk_prepare_enable(kirin_pcie->apb_phy_clk);
+	if (ret)
+		goto apb_phy_fail;
+
+	ret = clk_prepare_enable(kirin_pcie->pcie_aclk);
+	if (ret)
+		goto aclk_fail;
+
+	ret = clk_prepare_enable(kirin_pcie->pcie_aux_clk);
+	if (ret)
+		goto aux_clk_fail;
+
+	return 0;
+
+close_clk:
+	clk_disable_unprepare(kirin_pcie->pcie_aux_clk);
+aux_clk_fail:
+	clk_disable_unprepare(kirin_pcie->pcie_aclk);
+aclk_fail:
+	clk_disable_unprepare(kirin_pcie->apb_phy_clk);
+apb_phy_fail:
+	clk_disable_unprepare(kirin_pcie->apb_sys_clk);
+apb_sys_fail:
+	clk_disable_unprepare(kirin_pcie->phy_ref_clk);
+
+	return ret;
+}
+
+static int kirin_pcie_power_on(struct kirin_pcie *kirin_pcie)
+{
+	int ret;
+
+	/* Power supply for Host */
+	regmap_write(kirin_pcie->sysctrl,
+		     SCTRL_PCIE_CMOS_OFFSET, SCTRL_PCIE_CMOS_BIT);
+	usleep_range(TIME_CMOS_MIN, TIME_CMOS_MAX);
+	kirin_pcie_oe_enable(kirin_pcie);
+
+	ret = kirin_pcie_clk_ctrl(kirin_pcie, true);
+	if (ret)
+		return ret;
+
+	/* ISO disable, PCIeCtrl, PHY assert and clk gate clear */
+	regmap_write(kirin_pcie->sysctrl,
+		     SCTRL_PCIE_ISO_OFFSET, SCTRL_PCIE_ISO_BIT);
+	regmap_write(kirin_pcie->crgctrl,
+		     CRGCTRL_PCIE_ASSERT_OFFSET, CRGCTRL_PCIE_ASSERT_BIT);
+	regmap_write(kirin_pcie->sysctrl,
+		     SCTRL_PCIE_HPCLK_OFFSET, SCTRL_PCIE_HPCLK_BIT);
+
+	ret = kirin_pcie_phy_init(kirin_pcie);
+	if (ret)
+		goto close_clk;
+
+	/* perst assert Endpoint */
+	if (!gpio_request(kirin_pcie->gpio_id_reset, "pcie_perst")) {
+		usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX);
+		ret = gpio_direction_output(kirin_pcie->gpio_id_reset, 1);
+		if (ret)
+			goto close_clk;
+		usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX);
+
+		return 0;
+	}
+
+close_clk:
+	kirin_pcie_clk_ctrl(kirin_pcie, false);
+	return ret;
+}
+
+static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie,
+					   bool on)
+{
+	u32 val;
+
+	val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL0_ADDR);
+	if (on)
+		val = val | PCIE_ELBI_SLV_DBI_ENABLE;
+	else
+		val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
+
+	kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL0_ADDR);
+}
+
+static void kirin_pcie_sideband_dbi_r_mode(struct kirin_pcie *kirin_pcie,
+					   bool on)
+{
+	u32 val;
+
+	val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL1_ADDR);
+	if (on)
+		val = val | PCIE_ELBI_SLV_DBI_ENABLE;
+	else
+		val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
+
+	kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL1_ADDR);
+}
+
+static int kirin_pcie_rd_own_conf(struct pcie_port *pp,
+				  int where, int size, u32 *val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+	int ret;
+
+	kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true);
+	ret = dw_pcie_read(pci->dbi_base + where, size, val);
+	kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false);
+
+	return ret;
+}
+
+static int kirin_pcie_wr_own_conf(struct pcie_port *pp,
+				  int where, int size, u32 val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+	int ret;
+
+	kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true);
+	ret = dw_pcie_write(pci->dbi_base + where, size, val);
+	kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
+
+	return ret;
+}
+
+static u32 kirin_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
+			       u32 reg, size_t size)
+{
+	struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+	u32 ret;
+
+	kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true);
+	dw_pcie_read(base + reg, size, &ret);
+	kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false);
+
+	return ret;
+}
+
+static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
+				 u32 reg, size_t size, u32 val)
+{
+	struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+
+	kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true);
+	dw_pcie_write(base + reg, size, val);
+	kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
+}
+
+static int kirin_pcie_link_up(struct dw_pcie *pci)
+{
+	struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+	u32 val = kirin_apb_ctrl_readl(kirin_pcie, PCIE_APB_PHY_STATUS0);
+
+	if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE)
+		return 1;
+
+	return 0;
+}
+
+static int kirin_pcie_establish_link(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+	struct device *dev = kirin_pcie->pci->dev;
+	int count = 0;
+
+	if (kirin_pcie_link_up(pci))
+		return 0;
+
+	dw_pcie_setup_rc(pp);
+
+	/* assert LTSSM enable */
+	kirin_apb_ctrl_writel(kirin_pcie, PCIE_LTSSM_ENABLE_BIT,
+			      PCIE_APP_LTSSM_ENABLE);
+
+	/* check if the link is up or not */
+	while (!kirin_pcie_link_up(pci)) {
+		usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
+		count++;
+		if (count == 1000) {
+			dev_err(dev, "Link Fail\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int kirin_pcie_host_init(struct pcie_port *pp)
+{
+	kirin_pcie_establish_link(pp);
+
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		dw_pcie_msi_init(pp);
+
+	return 0;
+}
+
+static const struct dw_pcie_ops kirin_dw_pcie_ops = {
+	.read_dbi = kirin_pcie_read_dbi,
+	.write_dbi = kirin_pcie_write_dbi,
+	.link_up = kirin_pcie_link_up,
+};
+
+static const struct dw_pcie_host_ops kirin_pcie_host_ops = {
+	.rd_own_conf = kirin_pcie_rd_own_conf,
+	.wr_own_conf = kirin_pcie_wr_own_conf,
+	.host_init = kirin_pcie_host_init,
+};
+
+static int kirin_pcie_add_msi(struct dw_pcie *pci,
+				struct platform_device *pdev)
+{
+	int irq;
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		irq = platform_get_irq(pdev, 0);
+		if (irq < 0) {
+			dev_err(&pdev->dev,
+				"failed to get MSI IRQ (%d)\n", irq);
+			return irq;
+		}
+
+		pci->pp.msi_irq = irq;
+	}
+
+	return 0;
+}
+
+static int kirin_add_pcie_port(struct dw_pcie *pci,
+			       struct platform_device *pdev)
+{
+	int ret;
+
+	ret = kirin_pcie_add_msi(pci, pdev);
+	if (ret)
+		return ret;
+
+	pci->pp.ops = &kirin_pcie_host_ops;
+
+	return dw_pcie_host_init(&pci->pp);
+}
+
+static int kirin_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct kirin_pcie *kirin_pcie;
+	struct dw_pcie *pci;
+	int ret;
+
+	if (!dev->of_node) {
+		dev_err(dev, "NULL node\n");
+		return -EINVAL;
+	}
+
+	kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL);
+	if (!kirin_pcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	pci->dev = dev;
+	pci->ops = &kirin_dw_pcie_ops;
+	kirin_pcie->pci = pci;
+
+	ret = kirin_pcie_get_clk(kirin_pcie, pdev);
+	if (ret)
+		return ret;
+
+	ret = kirin_pcie_get_resource(kirin_pcie, pdev);
+	if (ret)
+		return ret;
+
+	kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node,
+						      "reset-gpios", 0);
+	if (kirin_pcie->gpio_id_reset < 0)
+		return -ENODEV;
+
+	ret = kirin_pcie_power_on(kirin_pcie);
+	if (ret)
+		return ret;
+
+	platform_set_drvdata(pdev, kirin_pcie);
+
+	return kirin_add_pcie_port(pci, pdev);
+}
+
+static const struct of_device_id kirin_pcie_match[] = {
+	{ .compatible = "hisilicon,kirin960-pcie" },
+	{},
+};
+
+static struct platform_driver kirin_pcie_driver = {
+	.probe			= kirin_pcie_probe,
+	.driver			= {
+		.name			= "kirin-pcie",
+		.of_match_table = kirin_pcie_match,
+		.suppress_bind_attrs = true,
+	},
+};
+builtin_platform_driver(kirin_pcie_driver);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-qcom.c b/marvell/linux/drivers/pci/controller/dwc/pcie-qcom.c
new file mode 100644
index 0000000..055cef6
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-qcom.c
@@ -0,0 +1,1551 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm PCIe root complex driver
+ *
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright 2015 Linaro Limited.
+ *
+ * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define PCIE20_PARF_SYS_CTRL			0x00
+#define MST_WAKEUP_EN				BIT(13)
+#define SLV_WAKEUP_EN				BIT(12)
+#define MSTR_ACLK_CGC_DIS			BIT(10)
+#define SLV_ACLK_CGC_DIS			BIT(9)
+#define CORE_CLK_CGC_DIS			BIT(6)
+#define AUX_PWR_DET				BIT(4)
+#define L23_CLK_RMV_DIS				BIT(2)
+#define L1_CLK_RMV_DIS				BIT(1)
+
+#define PCIE20_COMMAND_STATUS			0x04
+#define CMD_BME_VAL				0x4
+#define PCIE20_DEVICE_CONTROL2_STATUS2		0x98
+#define PCIE_CAP_CPL_TIMEOUT_DISABLE		0x10
+
+#define PCIE20_PARF_PHY_CTRL			0x40
+#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK	GENMASK(20, 16)
+#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x)		((x) << 16)
+
+#define PCIE20_PARF_PHY_REFCLK			0x4C
+#define PHY_REFCLK_SSP_EN			BIT(16)
+#define PHY_REFCLK_USE_PAD			BIT(12)
+
+#define PCIE20_PARF_DBI_BASE_ADDR		0x168
+#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE		0x16C
+#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL	0x174
+#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT	0x178
+#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2	0x1A8
+#define PCIE20_PARF_LTSSM			0x1B0
+#define PCIE20_PARF_SID_OFFSET			0x234
+#define PCIE20_PARF_BDF_TRANSLATE_CFG		0x24C
+#define PCIE20_PARF_DEVICE_TYPE			0x1000
+
+#define PCIE20_ELBI_SYS_CTRL			0x04
+#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE		BIT(0)
+
+#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0		0x818
+#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K	0x4
+#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K	0x5
+#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1		0x81c
+#define CFG_BRIDGE_SB_INIT			BIT(0)
+
+#define PCIE20_CAP				0x70
+#define PCIE20_CAP_LINK_CAPABILITIES		(PCIE20_CAP + 0xC)
+#define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT	(BIT(10) | BIT(11))
+#define PCIE20_CAP_LINK_1			(PCIE20_CAP + 0x14)
+#define PCIE_CAP_LINK1_VAL			0x2FD7F
+
+#define PCIE20_PARF_Q2A_FLUSH			0x1AC
+
+#define PCIE20_MISC_CONTROL_1_REG		0x8BC
+#define DBI_RO_WR_EN				1
+
+#define PERST_DELAY_US				1000
+/* PARF registers */
+#define PCIE20_PARF_PCS_DEEMPH			0x34
+#define PCS_DEEMPH_TX_DEEMPH_GEN1(x)		((x) << 16)
+#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x)	((x) << 8)
+#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x)	((x) << 0)
+
+#define PCIE20_PARF_PCS_SWING			0x38
+#define PCS_SWING_TX_SWING_FULL(x)		((x) << 8)
+#define PCS_SWING_TX_SWING_LOW(x)		((x) << 0)
+
+#define PCIE20_PARF_CONFIG_BITS		0x50
+#define PHY_RX0_EQ(x)				((x) << 24)
+
+#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE	0x358
+#define SLV_ADDR_SPACE_SZ			0x10000000
+
+#define DEVICE_TYPE_RC				0x4
+
+#define QCOM_PCIE_2_1_0_MAX_SUPPLY	3
+struct qcom_pcie_resources_2_1_0 {
+	struct clk *iface_clk;
+	struct clk *core_clk;
+	struct clk *phy_clk;
+	struct clk *aux_clk;
+	struct clk *ref_clk;
+	struct reset_control *pci_reset;
+	struct reset_control *axi_reset;
+	struct reset_control *ahb_reset;
+	struct reset_control *por_reset;
+	struct reset_control *phy_reset;
+	struct reset_control *ext_reset;
+	struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
+};
+
+struct qcom_pcie_resources_1_0_0 {
+	struct clk *iface;
+	struct clk *aux;
+	struct clk *master_bus;
+	struct clk *slave_bus;
+	struct reset_control *core;
+	struct regulator *vdda;
+};
+
+#define QCOM_PCIE_2_3_2_MAX_SUPPLY	2
+struct qcom_pcie_resources_2_3_2 {
+	struct clk *aux_clk;
+	struct clk *master_clk;
+	struct clk *slave_clk;
+	struct clk *cfg_clk;
+	struct clk *pipe_clk;
+	struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
+};
+
+#define QCOM_PCIE_2_4_0_MAX_CLOCKS	4
+struct qcom_pcie_resources_2_4_0 {
+	struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
+	int num_clks;
+	struct reset_control *axi_m_reset;
+	struct reset_control *axi_s_reset;
+	struct reset_control *pipe_reset;
+	struct reset_control *axi_m_vmid_reset;
+	struct reset_control *axi_s_xpu_reset;
+	struct reset_control *parf_reset;
+	struct reset_control *phy_reset;
+	struct reset_control *axi_m_sticky_reset;
+	struct reset_control *pipe_sticky_reset;
+	struct reset_control *pwr_reset;
+	struct reset_control *ahb_reset;
+	struct reset_control *phy_ahb_reset;
+};
+
+struct qcom_pcie_resources_2_3_3 {
+	struct clk *iface;
+	struct clk *axi_m_clk;
+	struct clk *axi_s_clk;
+	struct clk *ahb_clk;
+	struct clk *aux_clk;
+	struct reset_control *rst[7];
+};
+
+struct qcom_pcie_resources_2_7_0 {
+	struct clk_bulk_data clks[6];
+	struct regulator_bulk_data supplies[2];
+	struct reset_control *pci_reset;
+	struct clk *pipe_clk;
+};
+
+union qcom_pcie_resources {
+	struct qcom_pcie_resources_1_0_0 v1_0_0;
+	struct qcom_pcie_resources_2_1_0 v2_1_0;
+	struct qcom_pcie_resources_2_3_2 v2_3_2;
+	struct qcom_pcie_resources_2_3_3 v2_3_3;
+	struct qcom_pcie_resources_2_4_0 v2_4_0;
+	struct qcom_pcie_resources_2_7_0 v2_7_0;
+};
+
+struct qcom_pcie;
+
+struct qcom_pcie_ops {
+	int (*get_resources)(struct qcom_pcie *pcie);
+	int (*init)(struct qcom_pcie *pcie);
+	int (*post_init)(struct qcom_pcie *pcie);
+	void (*deinit)(struct qcom_pcie *pcie);
+	void (*post_deinit)(struct qcom_pcie *pcie);
+	void (*ltssm_enable)(struct qcom_pcie *pcie);
+};
+
+struct qcom_pcie {
+	struct dw_pcie *pci;
+	void __iomem *parf;			/* DT parf */
+	void __iomem *elbi;			/* DT elbi */
+	union qcom_pcie_resources res;
+	struct phy *phy;
+	struct gpio_desc *reset;
+	const struct qcom_pcie_ops *ops;
+};
+
+#define to_qcom_pcie(x)		dev_get_drvdata((x)->dev)
+
+static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
+{
+	gpiod_set_value_cansleep(pcie->reset, 1);
+	usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
+{
+	/* Ensure that PERST has been asserted for at least 100 ms */
+	msleep(100);
+	gpiod_set_value_cansleep(pcie->reset, 0);
+	usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
+{
+	struct dw_pcie *pci = pcie->pci;
+
+	if (dw_pcie_link_up(pci))
+		return 0;
+
+	/* Enable Link Training state machine */
+	if (pcie->ops->ltssm_enable)
+		pcie->ops->ltssm_enable(pcie);
+
+	return dw_pcie_wait_for_link(pci);
+}
+
+static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
+{
+	u32 val;
+
+	/* enable link training */
+	val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
+	val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
+	writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
+}
+
+static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	int ret;
+
+	res->supplies[0].supply = "vdda";
+	res->supplies[1].supply = "vdda_phy";
+	res->supplies[2].supply = "vdda_refclk";
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
+				      res->supplies);
+	if (ret)
+		return ret;
+
+	res->iface_clk = devm_clk_get(dev, "iface");
+	if (IS_ERR(res->iface_clk))
+		return PTR_ERR(res->iface_clk);
+
+	res->core_clk = devm_clk_get(dev, "core");
+	if (IS_ERR(res->core_clk))
+		return PTR_ERR(res->core_clk);
+
+	res->phy_clk = devm_clk_get(dev, "phy");
+	if (IS_ERR(res->phy_clk))
+		return PTR_ERR(res->phy_clk);
+
+	res->aux_clk = devm_clk_get_optional(dev, "aux");
+	if (IS_ERR(res->aux_clk))
+		return PTR_ERR(res->aux_clk);
+
+	res->ref_clk = devm_clk_get_optional(dev, "ref");
+	if (IS_ERR(res->ref_clk))
+		return PTR_ERR(res->ref_clk);
+
+	res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
+	if (IS_ERR(res->pci_reset))
+		return PTR_ERR(res->pci_reset);
+
+	res->axi_reset = devm_reset_control_get_exclusive(dev, "axi");
+	if (IS_ERR(res->axi_reset))
+		return PTR_ERR(res->axi_reset);
+
+	res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
+	if (IS_ERR(res->ahb_reset))
+		return PTR_ERR(res->ahb_reset);
+
+	res->por_reset = devm_reset_control_get_exclusive(dev, "por");
+	if (IS_ERR(res->por_reset))
+		return PTR_ERR(res->por_reset);
+
+	res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext");
+	if (IS_ERR(res->ext_reset))
+		return PTR_ERR(res->ext_reset);
+
+	res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
+	return PTR_ERR_OR_ZERO(res->phy_reset);
+}
+
+static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+
+	clk_disable_unprepare(res->phy_clk);
+	reset_control_assert(res->pci_reset);
+	reset_control_assert(res->axi_reset);
+	reset_control_assert(res->ahb_reset);
+	reset_control_assert(res->por_reset);
+	reset_control_assert(res->ext_reset);
+	reset_control_assert(res->phy_reset);
+	clk_disable_unprepare(res->iface_clk);
+	clk_disable_unprepare(res->core_clk);
+	clk_disable_unprepare(res->aux_clk);
+	clk_disable_unprepare(res->ref_clk);
+
+	writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+}
+
+static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	struct device_node *node = dev->of_node;
+	u32 val;
+	int ret;
+
+	/* reset the PCIe interface as uboot can leave it undefined state */
+	reset_control_assert(res->pci_reset);
+	reset_control_assert(res->axi_reset);
+	reset_control_assert(res->ahb_reset);
+	reset_control_assert(res->por_reset);
+	reset_control_assert(res->ext_reset);
+	reset_control_assert(res->phy_reset);
+
+	writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+	if (ret < 0) {
+		dev_err(dev, "cannot enable regulators\n");
+		return ret;
+	}
+
+	ret = reset_control_assert(res->ahb_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert ahb reset\n");
+		goto err_assert_ahb;
+	}
+
+	ret = clk_prepare_enable(res->iface_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable iface clock\n");
+		goto err_assert_ahb;
+	}
+
+	ret = clk_prepare_enable(res->core_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable core clock\n");
+		goto err_clk_core;
+	}
+
+	ret = clk_prepare_enable(res->aux_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable aux clock\n");
+		goto err_clk_aux;
+	}
+
+	ret = clk_prepare_enable(res->ref_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable ref clock\n");
+		goto err_clk_ref;
+	}
+
+	ret = reset_control_deassert(res->ahb_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert ahb reset\n");
+		goto err_deassert_ahb;
+	}
+
+	ret = reset_control_deassert(res->ext_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert ext reset\n");
+		goto err_deassert_ahb;
+	}
+
+	/* enable PCIe clocks and resets */
+	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+	val &= ~BIT(0);
+	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+	if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
+		writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
+			       PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
+			       PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
+		       pcie->parf + PCIE20_PARF_PCS_DEEMPH);
+		writel(PCS_SWING_TX_SWING_FULL(120) |
+			       PCS_SWING_TX_SWING_LOW(120),
+		       pcie->parf + PCIE20_PARF_PCS_SWING);
+		writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS);
+	}
+
+	if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
+		/* set TX termination offset */
+		val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+		val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
+		val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
+		writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+	}
+
+	/* enable external reference clock */
+	val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
+	/* USE_PAD is required only for ipq806x */
+	if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
+		val &= ~PHY_REFCLK_USE_PAD;
+	val |= PHY_REFCLK_SSP_EN;
+	writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
+
+	ret = reset_control_deassert(res->phy_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert phy reset\n");
+		return ret;
+	}
+
+	ret = reset_control_deassert(res->pci_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert pci reset\n");
+		return ret;
+	}
+
+	ret = reset_control_deassert(res->por_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert por reset\n");
+		return ret;
+	}
+
+	ret = reset_control_deassert(res->axi_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert axi reset\n");
+		return ret;
+	}
+
+	ret = clk_prepare_enable(res->phy_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable phy clock\n");
+		goto err_deassert_ahb;
+	}
+
+	/* wait for clock acquisition */
+	usleep_range(1000, 1500);
+
+
+	/* Set the Max TLP size to 2K, instead of using default of 4K */
+	writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
+	       pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
+	writel(CFG_BRIDGE_SB_INIT,
+	       pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
+
+	return 0;
+
+err_deassert_ahb:
+	clk_disable_unprepare(res->ref_clk);
+err_clk_ref:
+	clk_disable_unprepare(res->aux_clk);
+err_clk_aux:
+	clk_disable_unprepare(res->core_clk);
+err_clk_core:
+	clk_disable_unprepare(res->iface_clk);
+err_assert_ahb:
+	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+
+	return ret;
+}
+
+static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+
+	res->vdda = devm_regulator_get(dev, "vdda");
+	if (IS_ERR(res->vdda))
+		return PTR_ERR(res->vdda);
+
+	res->iface = devm_clk_get(dev, "iface");
+	if (IS_ERR(res->iface))
+		return PTR_ERR(res->iface);
+
+	res->aux = devm_clk_get(dev, "aux");
+	if (IS_ERR(res->aux))
+		return PTR_ERR(res->aux);
+
+	res->master_bus = devm_clk_get(dev, "master_bus");
+	if (IS_ERR(res->master_bus))
+		return PTR_ERR(res->master_bus);
+
+	res->slave_bus = devm_clk_get(dev, "slave_bus");
+	if (IS_ERR(res->slave_bus))
+		return PTR_ERR(res->slave_bus);
+
+	res->core = devm_reset_control_get_exclusive(dev, "core");
+	return PTR_ERR_OR_ZERO(res->core);
+}
+
+static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
+
+	reset_control_assert(res->core);
+	clk_disable_unprepare(res->slave_bus);
+	clk_disable_unprepare(res->master_bus);
+	clk_disable_unprepare(res->iface);
+	clk_disable_unprepare(res->aux);
+	regulator_disable(res->vdda);
+}
+
+static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	int ret;
+
+	ret = reset_control_deassert(res->core);
+	if (ret) {
+		dev_err(dev, "cannot deassert core reset\n");
+		return ret;
+	}
+
+	ret = clk_prepare_enable(res->aux);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable aux clock\n");
+		goto err_res;
+	}
+
+	ret = clk_prepare_enable(res->iface);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable iface clock\n");
+		goto err_aux;
+	}
+
+	ret = clk_prepare_enable(res->master_bus);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable master_bus clock\n");
+		goto err_iface;
+	}
+
+	ret = clk_prepare_enable(res->slave_bus);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable slave_bus clock\n");
+		goto err_master;
+	}
+
+	ret = regulator_enable(res->vdda);
+	if (ret) {
+		dev_err(dev, "cannot enable vdda regulator\n");
+		goto err_slave;
+	}
+
+	/* change DBI base address */
+	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+
+		val |= BIT(31);
+		writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+	}
+
+	return 0;
+err_slave:
+	clk_disable_unprepare(res->slave_bus);
+err_master:
+	clk_disable_unprepare(res->master_bus);
+err_iface:
+	clk_disable_unprepare(res->iface);
+err_aux:
+	clk_disable_unprepare(res->aux);
+err_res:
+	reset_control_assert(res->core);
+
+	return ret;
+}
+
+static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
+{
+	u32 val;
+
+	/* enable link training */
+	val = readl(pcie->parf + PCIE20_PARF_LTSSM);
+	val |= BIT(8);
+	writel(val, pcie->parf + PCIE20_PARF_LTSSM);
+}
+
+static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	int ret;
+
+	res->supplies[0].supply = "vdda";
+	res->supplies[1].supply = "vddpe-3v3";
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
+				      res->supplies);
+	if (ret)
+		return ret;
+
+	res->aux_clk = devm_clk_get(dev, "aux");
+	if (IS_ERR(res->aux_clk))
+		return PTR_ERR(res->aux_clk);
+
+	res->cfg_clk = devm_clk_get(dev, "cfg");
+	if (IS_ERR(res->cfg_clk))
+		return PTR_ERR(res->cfg_clk);
+
+	res->master_clk = devm_clk_get(dev, "bus_master");
+	if (IS_ERR(res->master_clk))
+		return PTR_ERR(res->master_clk);
+
+	res->slave_clk = devm_clk_get(dev, "bus_slave");
+	if (IS_ERR(res->slave_clk))
+		return PTR_ERR(res->slave_clk);
+
+	res->pipe_clk = devm_clk_get(dev, "pipe");
+	return PTR_ERR_OR_ZERO(res->pipe_clk);
+}
+
+static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+
+	clk_disable_unprepare(res->slave_clk);
+	clk_disable_unprepare(res->master_clk);
+	clk_disable_unprepare(res->cfg_clk);
+	clk_disable_unprepare(res->aux_clk);
+
+	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+}
+
+static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+
+	clk_disable_unprepare(res->pipe_clk);
+}
+
+static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	u32 val;
+	int ret;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+	if (ret < 0) {
+		dev_err(dev, "cannot enable regulators\n");
+		return ret;
+	}
+
+	ret = clk_prepare_enable(res->aux_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable aux clock\n");
+		goto err_aux_clk;
+	}
+
+	ret = clk_prepare_enable(res->cfg_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable cfg clock\n");
+		goto err_cfg_clk;
+	}
+
+	ret = clk_prepare_enable(res->master_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable master clock\n");
+		goto err_master_clk;
+	}
+
+	ret = clk_prepare_enable(res->slave_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable slave clock\n");
+		goto err_slave_clk;
+	}
+
+	/* enable PCIe clocks and resets */
+	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+	val &= ~BIT(0);
+	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+	/* change DBI base address */
+	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+	/* MAC PHY_POWERDOWN MUX DISABLE  */
+	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
+	val &= ~BIT(29);
+	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
+
+	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+	val |= BIT(4);
+	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+
+	val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+	val |= BIT(31);
+	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+
+	return 0;
+
+err_slave_clk:
+	clk_disable_unprepare(res->master_clk);
+err_master_clk:
+	clk_disable_unprepare(res->cfg_clk);
+err_cfg_clk:
+	clk_disable_unprepare(res->aux_clk);
+
+err_aux_clk:
+	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+
+	return ret;
+}
+
+static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	int ret;
+
+	ret = clk_prepare_enable(res->pipe_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable pipe clock\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
+	int ret;
+
+	res->clks[0].id = "aux";
+	res->clks[1].id = "master_bus";
+	res->clks[2].id = "slave_bus";
+	res->clks[3].id = "iface";
+
+	/* qcom,pcie-ipq4019 is defined without "iface" */
+	res->num_clks = is_ipq ? 3 : 4;
+
+	ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
+	if (ret < 0)
+		return ret;
+
+	res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
+	if (IS_ERR(res->axi_m_reset))
+		return PTR_ERR(res->axi_m_reset);
+
+	res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s");
+	if (IS_ERR(res->axi_s_reset))
+		return PTR_ERR(res->axi_s_reset);
+
+	if (is_ipq) {
+		/*
+		 * These resources relates to the PHY or are secure clocks, but
+		 * are controlled here for IPQ4019
+		 */
+		res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
+		if (IS_ERR(res->pipe_reset))
+			return PTR_ERR(res->pipe_reset);
+
+		res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
+									 "axi_m_vmid");
+		if (IS_ERR(res->axi_m_vmid_reset))
+			return PTR_ERR(res->axi_m_vmid_reset);
+
+		res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
+									"axi_s_xpu");
+		if (IS_ERR(res->axi_s_xpu_reset))
+			return PTR_ERR(res->axi_s_xpu_reset);
+
+		res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
+		if (IS_ERR(res->parf_reset))
+			return PTR_ERR(res->parf_reset);
+
+		res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
+		if (IS_ERR(res->phy_reset))
+			return PTR_ERR(res->phy_reset);
+	}
+
+	res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
+								   "axi_m_sticky");
+	if (IS_ERR(res->axi_m_sticky_reset))
+		return PTR_ERR(res->axi_m_sticky_reset);
+
+	res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev,
+								  "pipe_sticky");
+	if (IS_ERR(res->pipe_sticky_reset))
+		return PTR_ERR(res->pipe_sticky_reset);
+
+	res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr");
+	if (IS_ERR(res->pwr_reset))
+		return PTR_ERR(res->pwr_reset);
+
+	res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
+	if (IS_ERR(res->ahb_reset))
+		return PTR_ERR(res->ahb_reset);
+
+	if (is_ipq) {
+		res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
+		if (IS_ERR(res->phy_ahb_reset))
+			return PTR_ERR(res->phy_ahb_reset);
+	}
+
+	return 0;
+}
+
+static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
+
+	reset_control_assert(res->axi_m_reset);
+	reset_control_assert(res->axi_s_reset);
+	reset_control_assert(res->pipe_reset);
+	reset_control_assert(res->pipe_sticky_reset);
+	reset_control_assert(res->phy_reset);
+	reset_control_assert(res->phy_ahb_reset);
+	reset_control_assert(res->axi_m_sticky_reset);
+	reset_control_assert(res->pwr_reset);
+	reset_control_assert(res->ahb_reset);
+	clk_bulk_disable_unprepare(res->num_clks, res->clks);
+}
+
+static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	u32 val;
+	int ret;
+
+	ret = reset_control_assert(res->axi_m_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert axi master reset\n");
+		return ret;
+	}
+
+	ret = reset_control_assert(res->axi_s_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert axi slave reset\n");
+		return ret;
+	}
+
+	usleep_range(10000, 12000);
+
+	ret = reset_control_assert(res->pipe_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert pipe reset\n");
+		return ret;
+	}
+
+	ret = reset_control_assert(res->pipe_sticky_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert pipe sticky reset\n");
+		return ret;
+	}
+
+	ret = reset_control_assert(res->phy_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert phy reset\n");
+		return ret;
+	}
+
+	ret = reset_control_assert(res->phy_ahb_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert phy ahb reset\n");
+		return ret;
+	}
+
+	usleep_range(10000, 12000);
+
+	ret = reset_control_assert(res->axi_m_sticky_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert axi master sticky reset\n");
+		return ret;
+	}
+
+	ret = reset_control_assert(res->pwr_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert power reset\n");
+		return ret;
+	}
+
+	ret = reset_control_assert(res->ahb_reset);
+	if (ret) {
+		dev_err(dev, "cannot assert ahb reset\n");
+		return ret;
+	}
+
+	usleep_range(10000, 12000);
+
+	ret = reset_control_deassert(res->phy_ahb_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert phy ahb reset\n");
+		return ret;
+	}
+
+	ret = reset_control_deassert(res->phy_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert phy reset\n");
+		goto err_rst_phy;
+	}
+
+	ret = reset_control_deassert(res->pipe_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert pipe reset\n");
+		goto err_rst_pipe;
+	}
+
+	ret = reset_control_deassert(res->pipe_sticky_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert pipe sticky reset\n");
+		goto err_rst_pipe_sticky;
+	}
+
+	usleep_range(10000, 12000);
+
+	ret = reset_control_deassert(res->axi_m_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert axi master reset\n");
+		goto err_rst_axi_m;
+	}
+
+	ret = reset_control_deassert(res->axi_m_sticky_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert axi master sticky reset\n");
+		goto err_rst_axi_m_sticky;
+	}
+
+	ret = reset_control_deassert(res->axi_s_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert axi slave reset\n");
+		goto err_rst_axi_s;
+	}
+
+	ret = reset_control_deassert(res->pwr_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert power reset\n");
+		goto err_rst_pwr;
+	}
+
+	ret = reset_control_deassert(res->ahb_reset);
+	if (ret) {
+		dev_err(dev, "cannot deassert ahb reset\n");
+		goto err_rst_ahb;
+	}
+
+	usleep_range(10000, 12000);
+
+	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
+	if (ret)
+		goto err_clks;
+
+	/* enable PCIe clocks and resets */
+	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+	val &= ~BIT(0);
+	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+	/* change DBI base address */
+	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+	/* MAC PHY_POWERDOWN MUX DISABLE  */
+	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
+	val &= ~BIT(29);
+	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
+
+	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+	val |= BIT(4);
+	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+
+	val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+	val |= BIT(31);
+	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+
+	return 0;
+
+err_clks:
+	reset_control_assert(res->ahb_reset);
+err_rst_ahb:
+	reset_control_assert(res->pwr_reset);
+err_rst_pwr:
+	reset_control_assert(res->axi_s_reset);
+err_rst_axi_s:
+	reset_control_assert(res->axi_m_sticky_reset);
+err_rst_axi_m_sticky:
+	reset_control_assert(res->axi_m_reset);
+err_rst_axi_m:
+	reset_control_assert(res->pipe_sticky_reset);
+err_rst_pipe_sticky:
+	reset_control_assert(res->pipe_reset);
+err_rst_pipe:
+	reset_control_assert(res->phy_reset);
+err_rst_phy:
+	reset_control_assert(res->phy_ahb_reset);
+	return ret;
+}
+
+static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	int i;
+	const char *rst_names[] = { "axi_m", "axi_s", "pipe",
+				    "axi_m_sticky", "sticky",
+				    "ahb", "sleep", };
+
+	res->iface = devm_clk_get(dev, "iface");
+	if (IS_ERR(res->iface))
+		return PTR_ERR(res->iface);
+
+	res->axi_m_clk = devm_clk_get(dev, "axi_m");
+	if (IS_ERR(res->axi_m_clk))
+		return PTR_ERR(res->axi_m_clk);
+
+	res->axi_s_clk = devm_clk_get(dev, "axi_s");
+	if (IS_ERR(res->axi_s_clk))
+		return PTR_ERR(res->axi_s_clk);
+
+	res->ahb_clk = devm_clk_get(dev, "ahb");
+	if (IS_ERR(res->ahb_clk))
+		return PTR_ERR(res->ahb_clk);
+
+	res->aux_clk = devm_clk_get(dev, "aux");
+	if (IS_ERR(res->aux_clk))
+		return PTR_ERR(res->aux_clk);
+
+	for (i = 0; i < ARRAY_SIZE(rst_names); i++) {
+		res->rst[i] = devm_reset_control_get(dev, rst_names[i]);
+		if (IS_ERR(res->rst[i]))
+			return PTR_ERR(res->rst[i]);
+	}
+
+	return 0;
+}
+
+static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+
+	clk_disable_unprepare(res->iface);
+	clk_disable_unprepare(res->axi_m_clk);
+	clk_disable_unprepare(res->axi_s_clk);
+	clk_disable_unprepare(res->ahb_clk);
+	clk_disable_unprepare(res->aux_clk);
+}
+
+static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	int i, ret;
+	u32 val;
+
+	for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
+		ret = reset_control_assert(res->rst[i]);
+		if (ret) {
+			dev_err(dev, "reset #%d assert failed (%d)\n", i, ret);
+			return ret;
+		}
+	}
+
+	usleep_range(2000, 2500);
+
+	for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
+		ret = reset_control_deassert(res->rst[i]);
+		if (ret) {
+			dev_err(dev, "reset #%d deassert failed (%d)\n", i,
+				ret);
+			return ret;
+		}
+	}
+
+	/*
+	 * Don't have a way to see if the reset has completed.
+	 * Wait for some time.
+	 */
+	usleep_range(2000, 2500);
+
+	ret = clk_prepare_enable(res->iface);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable core clock\n");
+		goto err_clk_iface;
+	}
+
+	ret = clk_prepare_enable(res->axi_m_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable core clock\n");
+		goto err_clk_axi_m;
+	}
+
+	ret = clk_prepare_enable(res->axi_s_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable axi slave clock\n");
+		goto err_clk_axi_s;
+	}
+
+	ret = clk_prepare_enable(res->ahb_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable ahb clock\n");
+		goto err_clk_ahb;
+	}
+
+	ret = clk_prepare_enable(res->aux_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable aux clock\n");
+		goto err_clk_aux;
+	}
+
+	writel(SLV_ADDR_SPACE_SZ,
+		pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
+
+	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+	val &= ~BIT(0);
+	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+	writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
+		| SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
+		AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
+		pcie->parf + PCIE20_PARF_SYS_CTRL);
+	writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
+
+	writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS);
+	writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
+	writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1);
+
+	val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
+	val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT;
+	writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
+
+	writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base +
+		PCIE20_DEVICE_CONTROL2_STATUS2);
+
+	return 0;
+
+err_clk_aux:
+	clk_disable_unprepare(res->ahb_clk);
+err_clk_ahb:
+	clk_disable_unprepare(res->axi_s_clk);
+err_clk_axi_s:
+	clk_disable_unprepare(res->axi_m_clk);
+err_clk_axi_m:
+	clk_disable_unprepare(res->iface);
+err_clk_iface:
+	/*
+	 * Not checking for failure, will anyway return
+	 * the original failure in 'ret'.
+	 */
+	for (i = 0; i < ARRAY_SIZE(res->rst); i++)
+		reset_control_assert(res->rst[i]);
+
+	return ret;
+}
+
+static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	int ret;
+
+	res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
+	if (IS_ERR(res->pci_reset))
+		return PTR_ERR(res->pci_reset);
+
+	res->supplies[0].supply = "vdda";
+	res->supplies[1].supply = "vddpe-3v3";
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
+				      res->supplies);
+	if (ret)
+		return ret;
+
+	res->clks[0].id = "aux";
+	res->clks[1].id = "cfg";
+	res->clks[2].id = "bus_master";
+	res->clks[3].id = "bus_slave";
+	res->clks[4].id = "slave_q2a";
+	res->clks[5].id = "tbu";
+
+	ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
+	if (ret < 0)
+		return ret;
+
+	res->pipe_clk = devm_clk_get(dev, "pipe");
+	return PTR_ERR_OR_ZERO(res->pipe_clk);
+}
+
+static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+	struct dw_pcie *pci = pcie->pci;
+	struct device *dev = pci->dev;
+	u32 val;
+	int ret;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+	if (ret < 0) {
+		dev_err(dev, "cannot enable regulators\n");
+		return ret;
+	}
+
+	ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+	if (ret < 0)
+		goto err_disable_regulators;
+
+	ret = reset_control_assert(res->pci_reset);
+	if (ret < 0) {
+		dev_err(dev, "cannot deassert pci reset\n");
+		goto err_disable_clocks;
+	}
+
+	msleep(10);
+
+	ret = reset_control_deassert(res->pci_reset);
+	if (ret < 0) {
+		dev_err(dev, "cannot deassert pci reset\n");
+		goto err_assert_resets;
+	}
+
+	ret = clk_prepare_enable(res->pipe_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable pipe clock\n");
+		goto err_assert_resets;
+	}
+
+	/* configure PCIe to RC mode */
+	writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
+
+	/* enable PCIe clocks and resets */
+	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+	val &= ~BIT(0);
+	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+	/* change DBI base address */
+	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+	/* MAC PHY_POWERDOWN MUX DISABLE  */
+	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
+	val &= ~BIT(29);
+	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
+
+	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+	val |= BIT(4);
+	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+		val |= BIT(31);
+		writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+	}
+
+	return 0;
+err_assert_resets:
+	reset_control_assert(res->pci_reset);
+err_disable_clocks:
+	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+err_disable_regulators:
+	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+
+	return ret;
+}
+
+static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+
+	clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+}
+
+static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+
+	return clk_prepare_enable(res->pipe_clk);
+}
+
+static void qcom_pcie_post_deinit_2_7_0(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+
+	clk_disable_unprepare(res->pipe_clk);
+}
+
+static int qcom_pcie_link_up(struct dw_pcie *pci)
+{
+	u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
+
+	return !!(val & PCI_EXP_LNKSTA_DLLLA);
+}
+
+static int qcom_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct qcom_pcie *pcie = to_qcom_pcie(pci);
+	int ret;
+
+	qcom_ep_reset_assert(pcie);
+
+	ret = pcie->ops->init(pcie);
+	if (ret)
+		return ret;
+
+	ret = phy_power_on(pcie->phy);
+	if (ret)
+		goto err_deinit;
+
+	if (pcie->ops->post_init) {
+		ret = pcie->ops->post_init(pcie);
+		if (ret)
+			goto err_disable_phy;
+	}
+
+	dw_pcie_setup_rc(pp);
+
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		dw_pcie_msi_init(pp);
+
+	qcom_ep_reset_deassert(pcie);
+
+	ret = qcom_pcie_establish_link(pcie);
+	if (ret)
+		goto err;
+
+	return 0;
+err:
+	qcom_ep_reset_assert(pcie);
+	if (pcie->ops->post_deinit)
+		pcie->ops->post_deinit(pcie);
+err_disable_phy:
+	phy_power_off(pcie->phy);
+err_deinit:
+	pcie->ops->deinit(pcie);
+
+	return ret;
+}
+
+static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
+	.host_init = qcom_pcie_host_init,
+};
+
+/* Qcom IP rev.: 2.1.0	Synopsys IP rev.: 4.01a */
+static const struct qcom_pcie_ops ops_2_1_0 = {
+	.get_resources = qcom_pcie_get_resources_2_1_0,
+	.init = qcom_pcie_init_2_1_0,
+	.deinit = qcom_pcie_deinit_2_1_0,
+	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
+};
+
+/* Qcom IP rev.: 1.0.0	Synopsys IP rev.: 4.11a */
+static const struct qcom_pcie_ops ops_1_0_0 = {
+	.get_resources = qcom_pcie_get_resources_1_0_0,
+	.init = qcom_pcie_init_1_0_0,
+	.deinit = qcom_pcie_deinit_1_0_0,
+	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.3.2	Synopsys IP rev.: 4.21a */
+static const struct qcom_pcie_ops ops_2_3_2 = {
+	.get_resources = qcom_pcie_get_resources_2_3_2,
+	.init = qcom_pcie_init_2_3_2,
+	.post_init = qcom_pcie_post_init_2_3_2,
+	.deinit = qcom_pcie_deinit_2_3_2,
+	.post_deinit = qcom_pcie_post_deinit_2_3_2,
+	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.4.0	Synopsys IP rev.: 4.20a */
+static const struct qcom_pcie_ops ops_2_4_0 = {
+	.get_resources = qcom_pcie_get_resources_2_4_0,
+	.init = qcom_pcie_init_2_4_0,
+	.deinit = qcom_pcie_deinit_2_4_0,
+	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.3.3	Synopsys IP rev.: 4.30a */
+static const struct qcom_pcie_ops ops_2_3_3 = {
+	.get_resources = qcom_pcie_get_resources_2_3_3,
+	.init = qcom_pcie_init_2_3_3,
+	.deinit = qcom_pcie_deinit_2_3_3,
+	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.7.0	Synopsys IP rev.: 4.30a */
+static const struct qcom_pcie_ops ops_2_7_0 = {
+	.get_resources = qcom_pcie_get_resources_2_7_0,
+	.init = qcom_pcie_init_2_7_0,
+	.deinit = qcom_pcie_deinit_2_7_0,
+	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+	.post_init = qcom_pcie_post_init_2_7_0,
+	.post_deinit = qcom_pcie_post_deinit_2_7_0,
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	.link_up = qcom_pcie_link_up,
+};
+
+static int qcom_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	struct pcie_port *pp;
+	struct dw_pcie *pci;
+	struct qcom_pcie *pcie;
+	int ret;
+
+	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+	if (!pcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	pm_runtime_enable(dev);
+	ret = pm_runtime_get_sync(dev);
+	if (ret < 0) {
+		pm_runtime_disable(dev);
+		return ret;
+	}
+
+	pci->dev = dev;
+	pci->ops = &dw_pcie_ops;
+	pp = &pci->pp;
+
+	pcie->pci = pci;
+
+	pcie->ops = of_device_get_match_data(dev);
+
+	pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
+	if (IS_ERR(pcie->reset)) {
+		ret = PTR_ERR(pcie->reset);
+		goto err_pm_runtime_put;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
+	pcie->parf = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pcie->parf)) {
+		ret = PTR_ERR(pcie->parf);
+		goto err_pm_runtime_put;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+	pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
+	if (IS_ERR(pci->dbi_base)) {
+		ret = PTR_ERR(pci->dbi_base);
+		goto err_pm_runtime_put;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
+	pcie->elbi = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pcie->elbi)) {
+		ret = PTR_ERR(pcie->elbi);
+		goto err_pm_runtime_put;
+	}
+
+	pcie->phy = devm_phy_optional_get(dev, "pciephy");
+	if (IS_ERR(pcie->phy)) {
+		ret = PTR_ERR(pcie->phy);
+		goto err_pm_runtime_put;
+	}
+
+	ret = pcie->ops->get_resources(pcie);
+	if (ret)
+		goto err_pm_runtime_put;
+
+	pp->ops = &qcom_pcie_dw_ops;
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		pp->msi_irq = platform_get_irq_byname(pdev, "msi");
+		if (pp->msi_irq < 0) {
+			ret = pp->msi_irq;
+			goto err_pm_runtime_put;
+		}
+	}
+
+	ret = phy_init(pcie->phy);
+	if (ret)
+		goto err_pm_runtime_put;
+
+	platform_set_drvdata(pdev, pcie);
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "cannot initialize host\n");
+		goto err_phy_exit;
+	}
+
+	return 0;
+
+err_phy_exit:
+	phy_exit(pcie->phy);
+err_pm_runtime_put:
+	pm_runtime_put(dev);
+	pm_runtime_disable(dev);
+
+	return ret;
+}
+
+static const struct of_device_id qcom_pcie_match[] = {
+	{ .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 },
+	{ .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 },
+	{ .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 },
+	{ .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
+	{ .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
+	{ .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
+	{ .compatible = "qcom,pcie-qcs404", .data = &ops_2_4_0 },
+	{ .compatible = "qcom,pcie-sdm845", .data = &ops_2_7_0 },
+	{ }
+};
+
+static void qcom_fixup_class(struct pci_dev *dev)
+{
+	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
+
+static struct platform_driver qcom_pcie_driver = {
+	.probe = qcom_pcie_probe,
+	.driver = {
+		.name = "qcom-pcie",
+		.suppress_bind_attrs = true,
+		.of_match_table = qcom_pcie_match,
+	},
+};
+builtin_platform_driver(qcom_pcie_driver);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-spear13xx.c b/marvell/linux/drivers/pci/controller/dwc/pcie-spear13xx.c
new file mode 100644
index 0000000..7d0cdfd
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-spear13xx.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for ST Microelectronics SPEAr13xx SoCs
+ *
+ * SPEAr13xx PCIe Glue Layer Source Code
+ *
+ * Copyright (C) 2010-2014 ST Microelectronics
+ * Pratyush Anand <pratyush.anand@gmail.com>
+ * Mohit Kumar <mohit.kumar.dhaka@gmail.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+
+#include "pcie-designware.h"
+
+struct spear13xx_pcie {
+	struct dw_pcie		*pci;
+	void __iomem		*app_base;
+	struct phy		*phy;
+	struct clk		*clk;
+	bool			is_gen1;
+};
+
+struct pcie_app_reg {
+	u32	app_ctrl_0;		/* cr0 */
+	u32	app_ctrl_1;		/* cr1 */
+	u32	app_status_0;		/* cr2 */
+	u32	app_status_1;		/* cr3 */
+	u32	msg_status;		/* cr4 */
+	u32	msg_payload;		/* cr5 */
+	u32	int_sts;		/* cr6 */
+	u32	int_clr;		/* cr7 */
+	u32	int_mask;		/* cr8 */
+	u32	mst_bmisc;		/* cr9 */
+	u32	phy_ctrl;		/* cr10 */
+	u32	phy_status;		/* cr11 */
+	u32	cxpl_debug_info_0;	/* cr12 */
+	u32	cxpl_debug_info_1;	/* cr13 */
+	u32	ven_msg_ctrl_0;		/* cr14 */
+	u32	ven_msg_ctrl_1;		/* cr15 */
+	u32	ven_msg_data_0;		/* cr16 */
+	u32	ven_msg_data_1;		/* cr17 */
+	u32	ven_msi_0;		/* cr18 */
+	u32	ven_msi_1;		/* cr19 */
+	u32	mst_rmisc;		/* cr20 */
+};
+
+/* CR0 ID */
+#define APP_LTSSM_ENABLE_ID			3
+#define DEVICE_TYPE_RC				(4 << 25)
+#define MISCTRL_EN_ID				30
+#define REG_TRANSLATION_ENABLE			31
+
+/* CR3 ID */
+#define XMLH_LINK_UP				(1 << 6)
+
+/* CR6 */
+#define MSI_CTRL_INT				(1 << 26)
+
+#define EXP_CAP_ID_OFFSET			0x70
+
+#define to_spear13xx_pcie(x)	dev_get_drvdata((x)->dev)
+
+static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie)
+{
+	struct dw_pcie *pci = spear13xx_pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+	u32 val;
+	u32 exp_cap_off = EXP_CAP_ID_OFFSET;
+
+	if (dw_pcie_link_up(pci)) {
+		dev_err(pci->dev, "link already up\n");
+		return 0;
+	}
+
+	dw_pcie_setup_rc(pp);
+
+	/*
+	 * this controller support only 128 bytes read size, however its
+	 * default value in capability register is 512 bytes. So force
+	 * it to 128 here.
+	 */
+	dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, &val);
+	val &= ~PCI_EXP_DEVCTL_READRQ;
+	dw_pcie_write(pci->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, val);
+
+	dw_pcie_write(pci->dbi_base + PCI_VENDOR_ID, 2, 0x104A);
+	dw_pcie_write(pci->dbi_base + PCI_DEVICE_ID, 2, 0xCD80);
+
+	/*
+	 * if is_gen1 is set then handle it, so that some buggy card
+	 * also works
+	 */
+	if (spear13xx_pcie->is_gen1) {
+		dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP,
+			     4, &val);
+		if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
+			val &= ~((u32)PCI_EXP_LNKCAP_SLS);
+			val |= PCI_EXP_LNKCAP_SLS_2_5GB;
+			dw_pcie_write(pci->dbi_base + exp_cap_off +
+				      PCI_EXP_LNKCAP, 4, val);
+		}
+
+		dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2,
+			     2, &val);
+		if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
+			val &= ~((u32)PCI_EXP_LNKCAP_SLS);
+			val |= PCI_EXP_LNKCAP_SLS_2_5GB;
+			dw_pcie_write(pci->dbi_base + exp_cap_off +
+				      PCI_EXP_LNKCTL2, 2, val);
+		}
+	}
+
+	/* enable ltssm */
+	writel(DEVICE_TYPE_RC | (1 << MISCTRL_EN_ID)
+			| (1 << APP_LTSSM_ENABLE_ID)
+			| ((u32)1 << REG_TRANSLATION_ENABLE),
+			&app_reg->app_ctrl_0);
+
+	return dw_pcie_wait_for_link(pci);
+}
+
+static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
+{
+	struct spear13xx_pcie *spear13xx_pcie = arg;
+	struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+	struct dw_pcie *pci = spear13xx_pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	unsigned int status;
+
+	status = readl(&app_reg->int_sts);
+
+	if (status & MSI_CTRL_INT) {
+		BUG_ON(!IS_ENABLED(CONFIG_PCI_MSI));
+		dw_handle_msi_irq(pp);
+	}
+
+	writel(status, &app_reg->int_clr);
+
+	return IRQ_HANDLED;
+}
+
+static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pcie)
+{
+	struct dw_pcie *pci = spear13xx_pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+
+	/* Enable MSI interrupt */
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		dw_pcie_msi_init(pp);
+		writel(readl(&app_reg->int_mask) |
+				MSI_CTRL_INT, &app_reg->int_mask);
+	}
+}
+
+static int spear13xx_pcie_link_up(struct dw_pcie *pci)
+{
+	struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
+	struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+
+	if (readl(&app_reg->app_status_1) & XMLH_LINK_UP)
+		return 1;
+
+	return 0;
+}
+
+static int spear13xx_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
+
+	spear13xx_pcie_establish_link(spear13xx_pcie);
+	spear13xx_pcie_enable_interrupts(spear13xx_pcie);
+
+	return 0;
+}
+
+static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = {
+	.host_init = spear13xx_pcie_host_init,
+};
+
+static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
+				   struct platform_device *pdev)
+{
+	struct dw_pcie *pci = spear13xx_pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	pp->irq = platform_get_irq(pdev, 0);
+	if (pp->irq < 0) {
+		dev_err(dev, "failed to get irq\n");
+		return pp->irq;
+	}
+	ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler,
+			       IRQF_SHARED | IRQF_NO_THREAD,
+			       "spear1340-pcie", spear13xx_pcie);
+	if (ret) {
+		dev_err(dev, "failed to request irq %d\n", pp->irq);
+		return ret;
+	}
+
+	pp->ops = &spear13xx_pcie_host_ops;
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "failed to initialize host\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	.link_up = spear13xx_pcie_link_up,
+};
+
+static int spear13xx_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dw_pcie *pci;
+	struct spear13xx_pcie *spear13xx_pcie;
+	struct device_node *np = dev->of_node;
+	struct resource *dbi_base;
+	int ret;
+
+	spear13xx_pcie = devm_kzalloc(dev, sizeof(*spear13xx_pcie), GFP_KERNEL);
+	if (!spear13xx_pcie)
+		return -ENOMEM;
+
+	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	if (!pci)
+		return -ENOMEM;
+
+	pci->dev = dev;
+	pci->ops = &dw_pcie_ops;
+
+	spear13xx_pcie->pci = pci;
+
+	spear13xx_pcie->phy = devm_phy_get(dev, "pcie-phy");
+	if (IS_ERR(spear13xx_pcie->phy)) {
+		ret = PTR_ERR(spear13xx_pcie->phy);
+		if (ret == -EPROBE_DEFER)
+			dev_info(dev, "probe deferred\n");
+		else
+			dev_err(dev, "couldn't get pcie-phy\n");
+		return ret;
+	}
+
+	phy_init(spear13xx_pcie->phy);
+
+	spear13xx_pcie->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(spear13xx_pcie->clk)) {
+		dev_err(dev, "couldn't get clk for pcie\n");
+		return PTR_ERR(spear13xx_pcie->clk);
+	}
+	ret = clk_prepare_enable(spear13xx_pcie->clk);
+	if (ret) {
+		dev_err(dev, "couldn't enable clk for pcie\n");
+		return ret;
+	}
+
+	dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+	pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
+	if (IS_ERR(pci->dbi_base)) {
+		dev_err(dev, "couldn't remap dbi base %p\n", dbi_base);
+		ret = PTR_ERR(pci->dbi_base);
+		goto fail_clk;
+	}
+	spear13xx_pcie->app_base = pci->dbi_base + 0x2000;
+
+	if (of_property_read_bool(np, "st,pcie-is-gen1"))
+		spear13xx_pcie->is_gen1 = true;
+
+	platform_set_drvdata(pdev, spear13xx_pcie);
+
+	ret = spear13xx_add_pcie_port(spear13xx_pcie, pdev);
+	if (ret < 0)
+		goto fail_clk;
+
+	return 0;
+
+fail_clk:
+	clk_disable_unprepare(spear13xx_pcie->clk);
+
+	return ret;
+}
+
+static const struct of_device_id spear13xx_pcie_of_match[] = {
+	{ .compatible = "st,spear1340-pcie", },
+	{},
+};
+
+static struct platform_driver spear13xx_pcie_driver = {
+	.probe		= spear13xx_pcie_probe,
+	.driver = {
+		.name	= "spear-pcie",
+		.of_match_table = of_match_ptr(spear13xx_pcie_of_match),
+		.suppress_bind_attrs = true,
+	},
+};
+
+builtin_platform_driver(spear13xx_pcie_driver);
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-tegra194.c b/marvell/linux/drivers/pci/controller/dwc/pcie-tegra194.c
new file mode 100644
index 0000000..1cf9485
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -0,0 +1,1728 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PCIe host controller driver for Tegra194 SoC
+ *
+ * Copyright (C) 2019 NVIDIA Corporation.
+ *
+ * Author: Vidya Sagar <vidyas@nvidia.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/random.h>
+#include <linux/reset.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+#include "pcie-designware.h"
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+#include "../../pci.h"
+
+#define APPL_PINMUX				0x0
+#define APPL_PINMUX_PEX_RST			BIT(0)
+#define APPL_PINMUX_CLKREQ_OVERRIDE_EN		BIT(2)
+#define APPL_PINMUX_CLKREQ_OVERRIDE		BIT(3)
+#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN	BIT(4)
+#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE	BIT(5)
+#define APPL_PINMUX_CLKREQ_OUT_OVRD_EN		BIT(9)
+#define APPL_PINMUX_CLKREQ_OUT_OVRD		BIT(10)
+
+#define APPL_CTRL				0x4
+#define APPL_CTRL_SYS_PRE_DET_STATE		BIT(6)
+#define APPL_CTRL_LTSSM_EN			BIT(7)
+#define APPL_CTRL_HW_HOT_RST_EN			BIT(20)
+#define APPL_CTRL_HW_HOT_RST_MODE_MASK		GENMASK(1, 0)
+#define APPL_CTRL_HW_HOT_RST_MODE_SHIFT		22
+#define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST	0x1
+
+#define APPL_INTR_EN_L0_0			0x8
+#define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN	BIT(0)
+#define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN	BIT(4)
+#define APPL_INTR_EN_L0_0_INT_INT_EN		BIT(8)
+#define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN	BIT(19)
+#define APPL_INTR_EN_L0_0_SYS_INTR_EN		BIT(30)
+#define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN	BIT(31)
+
+#define APPL_INTR_STATUS_L0			0xC
+#define APPL_INTR_STATUS_L0_LINK_STATE_INT	BIT(0)
+#define APPL_INTR_STATUS_L0_INT_INT		BIT(8)
+#define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT	BIT(18)
+
+#define APPL_INTR_EN_L1_0_0				0x1C
+#define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN	BIT(1)
+
+#define APPL_INTR_STATUS_L1_0_0				0x20
+#define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED	BIT(1)
+
+#define APPL_INTR_STATUS_L1_1			0x2C
+#define APPL_INTR_STATUS_L1_2			0x30
+#define APPL_INTR_STATUS_L1_3			0x34
+#define APPL_INTR_STATUS_L1_6			0x3C
+#define APPL_INTR_STATUS_L1_7			0x40
+
+#define APPL_INTR_EN_L1_8_0			0x44
+#define APPL_INTR_EN_L1_8_BW_MGT_INT_EN		BIT(2)
+#define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN	BIT(3)
+#define APPL_INTR_EN_L1_8_INTX_EN		BIT(11)
+#define APPL_INTR_EN_L1_8_AER_INT_EN		BIT(15)
+
+#define APPL_INTR_STATUS_L1_8_0			0x4C
+#define APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK	GENMASK(11, 6)
+#define APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS	BIT(2)
+#define APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS	BIT(3)
+
+#define APPL_INTR_STATUS_L1_9			0x54
+#define APPL_INTR_STATUS_L1_10			0x58
+#define APPL_INTR_STATUS_L1_11			0x64
+#define APPL_INTR_STATUS_L1_13			0x74
+#define APPL_INTR_STATUS_L1_14			0x78
+#define APPL_INTR_STATUS_L1_15			0x7C
+#define APPL_INTR_STATUS_L1_17			0x88
+
+#define APPL_INTR_EN_L1_18				0x90
+#define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMPLT		BIT(2)
+#define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR		BIT(1)
+#define APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR	BIT(0)
+
+#define APPL_INTR_STATUS_L1_18				0x94
+#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT	BIT(2)
+#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR	BIT(1)
+#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR	BIT(0)
+
+#define APPL_MSI_CTRL_2				0xB0
+
+#define APPL_LTR_MSG_1				0xC4
+#define LTR_MSG_REQ				BIT(15)
+#define LTR_MST_NO_SNOOP_SHIFT			16
+
+#define APPL_LTR_MSG_2				0xC8
+#define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE	BIT(3)
+
+#define APPL_LINK_STATUS			0xCC
+#define APPL_LINK_STATUS_RDLH_LINK_UP		BIT(0)
+
+#define APPL_DEBUG				0xD0
+#define APPL_DEBUG_PM_LINKST_IN_L2_LAT		BIT(21)
+#define APPL_DEBUG_PM_LINKST_IN_L0		0x11
+#define APPL_DEBUG_LTSSM_STATE_MASK		GENMASK(8, 3)
+#define APPL_DEBUG_LTSSM_STATE_SHIFT		3
+#define LTSSM_STATE_PRE_DETECT			5
+
+#define APPL_RADM_STATUS			0xE4
+#define APPL_PM_XMT_TURNOFF_STATE		BIT(0)
+
+#define APPL_DM_TYPE				0x100
+#define APPL_DM_TYPE_MASK			GENMASK(3, 0)
+#define APPL_DM_TYPE_RP				0x4
+#define APPL_DM_TYPE_EP				0x0
+
+#define APPL_CFG_BASE_ADDR			0x104
+#define APPL_CFG_BASE_ADDR_MASK			GENMASK(31, 12)
+
+#define APPL_CFG_IATU_DMA_BASE_ADDR		0x108
+#define APPL_CFG_IATU_DMA_BASE_ADDR_MASK	GENMASK(31, 18)
+
+#define APPL_CFG_MISC				0x110
+#define APPL_CFG_MISC_SLV_EP_MODE		BIT(14)
+#define APPL_CFG_MISC_ARCACHE_MASK		GENMASK(13, 10)
+#define APPL_CFG_MISC_ARCACHE_SHIFT		10
+#define APPL_CFG_MISC_ARCACHE_VAL		3
+
+#define APPL_CFG_SLCG_OVERRIDE			0x114
+#define APPL_CFG_SLCG_OVERRIDE_SLCG_EN_MASTER	BIT(0)
+
+#define APPL_CAR_RESET_OVRD				0x12C
+#define APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N	BIT(0)
+
+#define IO_BASE_IO_DECODE				BIT(0)
+#define IO_BASE_IO_DECODE_BIT8				BIT(8)
+
+#define CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE		BIT(0)
+#define CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE	BIT(16)
+
+#define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF	0x718
+#define CFG_TIMER_CTRL_ACK_NAK_SHIFT	(19)
+
+#define EVENT_COUNTER_ALL_CLEAR		0x3
+#define EVENT_COUNTER_ENABLE_ALL	0x7
+#define EVENT_COUNTER_ENABLE_SHIFT	2
+#define EVENT_COUNTER_EVENT_SEL_MASK	GENMASK(7, 0)
+#define EVENT_COUNTER_EVENT_SEL_SHIFT	16
+#define EVENT_COUNTER_EVENT_Tx_L0S	0x2
+#define EVENT_COUNTER_EVENT_Rx_L0S	0x3
+#define EVENT_COUNTER_EVENT_L1		0x5
+#define EVENT_COUNTER_EVENT_L1_1	0x7
+#define EVENT_COUNTER_EVENT_L1_2	0x8
+#define EVENT_COUNTER_GROUP_SEL_SHIFT	24
+#define EVENT_COUNTER_GROUP_5		0x5
+
+#define PORT_LOGIC_ACK_F_ASPM_CTRL			0x70C
+#define ENTER_ASPM					BIT(30)
+#define L0S_ENTRANCE_LAT_SHIFT				24
+#define L0S_ENTRANCE_LAT_MASK				GENMASK(26, 24)
+#define L1_ENTRANCE_LAT_SHIFT				27
+#define L1_ENTRANCE_LAT_MASK				GENMASK(29, 27)
+#define N_FTS_SHIFT					8
+#define N_FTS_MASK					GENMASK(7, 0)
+#define N_FTS_VAL					52
+
+#define PORT_LOGIC_GEN2_CTRL				0x80C
+#define PORT_LOGIC_GEN2_CTRL_DIRECT_SPEED_CHANGE	BIT(17)
+#define FTS_MASK					GENMASK(7, 0)
+#define FTS_VAL						52
+
+#define PORT_LOGIC_MSI_CTRL_INT_0_EN		0x828
+
+#define GEN3_EQ_CONTROL_OFF			0x8a8
+#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT	8
+#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK	GENMASK(23, 8)
+#define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK	GENMASK(3, 0)
+
+#define GEN3_RELATED_OFF			0x890
+#define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL	BIT(0)
+#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE	BIT(16)
+#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT	24
+#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK	GENMASK(25, 24)
+
+#define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT	0x8D0
+#define AMBA_ERROR_RESPONSE_CRS_SHIFT		3
+#define AMBA_ERROR_RESPONSE_CRS_MASK		GENMASK(1, 0)
+#define AMBA_ERROR_RESPONSE_CRS_OKAY		0
+#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF	1
+#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001	2
+
+#define PORT_LOGIC_MSIX_DOORBELL			0x948
+
+#define CAP_SPCIE_CAP_OFF			0x154
+#define CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK	GENMASK(3, 0)
+#define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK	GENMASK(11, 8)
+#define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT	8
+
+#define PME_ACK_TIMEOUT 10000
+
+#define LTSSM_TIMEOUT 50000	/* 50ms */
+
+#define GEN3_GEN4_EQ_PRESET_INIT	5
+
+#define GEN1_CORE_CLK_FREQ	62500000
+#define GEN2_CORE_CLK_FREQ	125000000
+#define GEN3_CORE_CLK_FREQ	250000000
+#define GEN4_CORE_CLK_FREQ	500000000
+
+static const unsigned int pcie_gen_freq[] = {
+	GEN1_CORE_CLK_FREQ,
+	GEN2_CORE_CLK_FREQ,
+	GEN3_CORE_CLK_FREQ,
+	GEN4_CORE_CLK_FREQ
+};
+
+static const u32 event_cntr_ctrl_offset[] = {
+	0x1d8,
+	0x1a8,
+	0x1a8,
+	0x1a8,
+	0x1c4,
+	0x1d8
+};
+
+static const u32 event_cntr_data_offset[] = {
+	0x1dc,
+	0x1ac,
+	0x1ac,
+	0x1ac,
+	0x1c8,
+	0x1dc
+};
+
+struct tegra_pcie_dw {
+	struct device *dev;
+	struct resource *appl_res;
+	struct resource *dbi_res;
+	struct resource *atu_dma_res;
+	void __iomem *appl_base;
+	struct clk *core_clk;
+	struct reset_control *core_apb_rst;
+	struct reset_control *core_rst;
+	struct dw_pcie pci;
+	struct tegra_bpmp *bpmp;
+
+	bool supports_clkreq;
+	bool enable_cdm_check;
+	bool link_state;
+	bool update_fc_fixup;
+	u8 init_link_width;
+	u32 msi_ctrl_int;
+	u32 num_lanes;
+	u32 max_speed;
+	u32 cid;
+	u32 cfg_link_cap_l1sub;
+	u32 pcie_cap_base;
+	u32 aspm_cmrt;
+	u32 aspm_pwr_on_t;
+	u32 aspm_l0s_enter_lat;
+
+	struct regulator *pex_ctl_supply;
+	struct regulator *slot_ctl_3v3;
+	struct regulator *slot_ctl_12v;
+
+	unsigned int phy_count;
+	struct phy **phys;
+
+	struct dentry *debugfs;
+};
+
+static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
+{
+	return container_of(pci, struct tegra_pcie_dw, pci);
+}
+
+static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value,
+			       const u32 reg)
+{
+	writel_relaxed(value, pcie->appl_base + reg);
+}
+
+static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)
+{
+	return readl_relaxed(pcie->appl_base + reg);
+}
+
+struct tegra_pcie_soc {
+	enum dw_pcie_device_mode mode;
+};
+
+static void apply_bad_link_workaround(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+	u32 current_link_width;
+	u16 val;
+
+	/*
+	 * NOTE:- Since this scenario is uncommon and link as such is not
+	 * stable anyway, not waiting to confirm if link is really
+	 * transitioning to Gen-2 speed
+	 */
+	val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
+	if (val & PCI_EXP_LNKSTA_LBMS) {
+		current_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
+		if (pcie->init_link_width > current_link_width) {
+			dev_warn(pci->dev, "PCIe link is bad, width reduced\n");
+			val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+						PCI_EXP_LNKCTL2);
+			val &= ~PCI_EXP_LNKCTL2_TLS;
+			val |= PCI_EXP_LNKCTL2_TLS_2_5GT;
+			dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
+					   PCI_EXP_LNKCTL2, val);
+
+			val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+						PCI_EXP_LNKCTL);
+			val |= PCI_EXP_LNKCTL_RL;
+			dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
+					   PCI_EXP_LNKCTL, val);
+		}
+	}
+}
+
+static irqreturn_t tegra_pcie_rp_irq_handler(struct tegra_pcie_dw *pcie)
+{
+	struct dw_pcie *pci = &pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	u32 val, status_l0, status_l1;
+	u16 val_w;
+
+	status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
+	if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
+		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
+		appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
+		if (status_l1 & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) {
+			/* SBR & Surprise Link Down WAR */
+			val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
+			val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
+			appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
+			udelay(1);
+			val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
+			val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
+			appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
+
+			val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL);
+			val |= PORT_LOGIC_GEN2_CTRL_DIRECT_SPEED_CHANGE;
+			dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val);
+		}
+	}
+
+	if (status_l0 & APPL_INTR_STATUS_L0_INT_INT) {
+		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0);
+		if (status_l1 & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) {
+			appl_writel(pcie,
+				    APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS,
+				    APPL_INTR_STATUS_L1_8_0);
+			apply_bad_link_workaround(pp);
+		}
+		if (status_l1 & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) {
+			appl_writel(pcie,
+				    APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS,
+				    APPL_INTR_STATUS_L1_8_0);
+
+			val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+						  PCI_EXP_LNKSTA);
+			dev_dbg(pci->dev, "Link Speed : Gen-%u\n", val_w &
+				PCI_EXP_LNKSTA_CLS);
+		}
+	}
+
+	if (status_l0 & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) {
+		status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_18);
+		val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
+		if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) {
+			dev_info(pci->dev, "CDM check complete\n");
+			val |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE;
+		}
+		if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) {
+			dev_err(pci->dev, "CDM comparison mismatch\n");
+			val |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR;
+		}
+		if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) {
+			dev_err(pci->dev, "CDM Logic error\n");
+			val |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR;
+		}
+		dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
+		val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR);
+		dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", val);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_pcie_irq_handler(int irq, void *arg)
+{
+	struct tegra_pcie_dw *pcie = arg;
+
+	return tegra_pcie_rp_irq_handler(pcie);
+}
+
+static int tegra_pcie_dw_rd_own_conf(struct pcie_port *pp, int where, int size,
+				     u32 *val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+	/*
+	 * This is an endpoint mode specific register happen to appear even
+	 * when controller is operating in root port mode and system hangs
+	 * when it is accessed with link being in ASPM-L1 state.
+	 * So skip accessing it altogether
+	 */
+	if (where == PORT_LOGIC_MSIX_DOORBELL) {
+		*val = 0x00000000;
+		return PCIBIOS_SUCCESSFUL;
+	}
+
+	return dw_pcie_read(pci->dbi_base + where, size, val);
+}
+
+static int tegra_pcie_dw_wr_own_conf(struct pcie_port *pp, int where, int size,
+				     u32 val)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+	/*
+	 * This is an endpoint mode specific register happen to appear even
+	 * when controller is operating in root port mode and system hangs
+	 * when it is accessed with link being in ASPM-L1 state.
+	 * So skip accessing it altogether
+	 */
+	if (where == PORT_LOGIC_MSIX_DOORBELL)
+		return PCIBIOS_SUCCESSFUL;
+
+	return dw_pcie_write(pci->dbi_base + where, size, val);
+}
+
+#if defined(CONFIG_PCIEASPM)
+static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
+{
+	u32 val;
+
+	val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
+	val &= ~PCI_L1SS_CAP_ASPM_L1_1;
+	dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
+}
+
+static void disable_aspm_l12(struct tegra_pcie_dw *pcie)
+{
+	u32 val;
+
+	val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
+	val &= ~PCI_L1SS_CAP_ASPM_L1_2;
+	dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
+}
+
+static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event)
+{
+	u32 val;
+
+	val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid]);
+	val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT);
+	val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
+	val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT;
+	val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
+	dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val);
+	val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_data_offset[pcie->cid]);
+
+	return val;
+}
+
+static int aspm_state_cnt(struct seq_file *s, void *data)
+{
+	struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *)
+				     dev_get_drvdata(s->private);
+	u32 val;
+
+	seq_printf(s, "Tx L0s entry count : %u\n",
+		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_Tx_L0S));
+
+	seq_printf(s, "Rx L0s entry count : %u\n",
+		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_Rx_L0S));
+
+	seq_printf(s, "Link L1 entry count : %u\n",
+		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1));
+
+	seq_printf(s, "Link L1.1 entry count : %u\n",
+		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_1));
+
+	seq_printf(s, "Link L1.2 entry count : %u\n",
+		   event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2));
+
+	/* Clear all counters */
+	dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid],
+			   EVENT_COUNTER_ALL_CLEAR);
+
+	/* Re-enable counting */
+	val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
+	val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
+	dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val);
+
+	return 0;
+}
+
+static void init_host_aspm(struct tegra_pcie_dw *pcie)
+{
+	struct dw_pcie *pci = &pcie->pci;
+	u32 val;
+
+	val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
+	pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP;
+
+	/* Enable ASPM counters */
+	val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
+	val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
+	dw_pcie_writel_dbi(pci, event_cntr_ctrl_offset[pcie->cid], val);
+
+	/* Program T_cmrt and T_pwr_on values */
+	val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
+	val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE);
+	val |= (pcie->aspm_cmrt << 8);
+	val |= (pcie->aspm_pwr_on_t << 19);
+	dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val);
+
+	/* Program L0s and L1 entrance latencies */
+	val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL);
+	val &= ~L0S_ENTRANCE_LAT_MASK;
+	val |= (pcie->aspm_l0s_enter_lat << L0S_ENTRANCE_LAT_SHIFT);
+	val |= ENTER_ASPM;
+	dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val);
+}
+
+static int init_debugfs(struct tegra_pcie_dw *pcie)
+{
+	struct dentry *d;
+
+	d = debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt",
+					pcie->debugfs, aspm_state_cnt);
+	if (IS_ERR_OR_NULL(d))
+		dev_err(pcie->dev,
+			"Failed to create debugfs file \"aspm_state_cnt\"\n");
+
+	return 0;
+}
+#else
+static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; }
+static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; }
+static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; }
+static inline int init_debugfs(struct tegra_pcie_dw *pcie) { return 0; }
+#endif
+
+static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+	u32 val;
+	u16 val_w;
+
+	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
+	val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
+	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
+
+	val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
+	val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN;
+	appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
+
+	if (pcie->enable_cdm_check) {
+		val = appl_readl(pcie, APPL_INTR_EN_L0_0);
+		val |= APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN;
+		appl_writel(pcie, val, APPL_INTR_EN_L0_0);
+
+		val = appl_readl(pcie, APPL_INTR_EN_L1_18);
+		val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR;
+		val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR;
+		appl_writel(pcie, val, APPL_INTR_EN_L1_18);
+	}
+
+	val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
+				  PCI_EXP_LNKSTA);
+	pcie->init_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val_w);
+
+	val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
+				  PCI_EXP_LNKCTL);
+	val_w |= PCI_EXP_LNKCTL_LBMIE;
+	dw_pcie_writew_dbi(&pcie->pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL,
+			   val_w);
+}
+
+static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+	u32 val;
+
+	/* Enable legacy interrupt generation */
+	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
+	val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
+	val |= APPL_INTR_EN_L0_0_INT_INT_EN;
+	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
+
+	val = appl_readl(pcie, APPL_INTR_EN_L1_8_0);
+	val |= APPL_INTR_EN_L1_8_INTX_EN;
+	val |= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN;
+	val |= APPL_INTR_EN_L1_8_BW_MGT_INT_EN;
+	if (IS_ENABLED(CONFIG_PCIEAER))
+		val |= APPL_INTR_EN_L1_8_AER_INT_EN;
+	appl_writel(pcie, val, APPL_INTR_EN_L1_8_0);
+}
+
+static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+	u32 val;
+
+	dw_pcie_msi_init(pp);
+
+	/* Enable MSI interrupt generation */
+	val = appl_readl(pcie, APPL_INTR_EN_L0_0);
+	val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN;
+	val |= APPL_INTR_EN_L0_0_MSI_RCV_INT_EN;
+	appl_writel(pcie, val, APPL_INTR_EN_L0_0);
+}
+
+static void tegra_pcie_enable_interrupts(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+	/* Clear interrupt statuses before enabling interrupts */
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
+	appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
+
+	tegra_pcie_enable_system_interrupts(pp);
+	tegra_pcie_enable_legacy_interrupts(pp);
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		tegra_pcie_enable_msi_interrupts(pp);
+}
+
+static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
+{
+	struct dw_pcie *pci = &pcie->pci;
+	u32 val, offset, i;
+
+	/* Program init preset */
+	for (i = 0; i < pcie->num_lanes; i++) {
+		dw_pcie_read(pci->dbi_base + CAP_SPCIE_CAP_OFF
+				 + (i * 2), 2, &val);
+		val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK;
+		val |= GEN3_GEN4_EQ_PRESET_INIT;
+		val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK;
+		val |= (GEN3_GEN4_EQ_PRESET_INIT <<
+			   CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT);
+		dw_pcie_write(pci->dbi_base + CAP_SPCIE_CAP_OFF
+				 + (i * 2), 2, val);
+
+		offset = dw_pcie_find_ext_capability(pci,
+						     PCI_EXT_CAP_ID_PL_16GT) +
+				PCI_PL_16GT_LE_CTRL;
+		dw_pcie_read(pci->dbi_base + offset + i, 1, &val);
+		val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK;
+		val |= GEN3_GEN4_EQ_PRESET_INIT;
+		val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK;
+		val |= (GEN3_GEN4_EQ_PRESET_INIT <<
+			PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT);
+		dw_pcie_write(pci->dbi_base + offset + i, 1, val);
+	}
+
+	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+	val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
+	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+
+	val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
+	val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
+	val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
+	val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
+	dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
+
+	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+	val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
+	val |= (0x1 << GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT);
+	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+
+	val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
+	val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
+	val |= (0x360 << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
+	val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
+	dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
+
+	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+	val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
+	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+}
+
+static void tegra_pcie_prepare_host(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+	u32 val;
+
+	val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);
+	val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);
+	dw_pcie_writel_dbi(pci, PCI_IO_BASE, val);
+
+	val = dw_pcie_readl_dbi(pci, PCI_PREF_MEMORY_BASE);
+	val |= CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE;
+	val |= CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE;
+	dw_pcie_writel_dbi(pci, PCI_PREF_MEMORY_BASE, val);
+
+	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
+
+	/* Configure FTS */
+	val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL);
+	val &= ~(N_FTS_MASK << N_FTS_SHIFT);
+	val |= N_FTS_VAL << N_FTS_SHIFT;
+	dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val);
+
+	val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL);
+	val &= ~FTS_MASK;
+	val |= FTS_VAL;
+	dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val);
+
+	/* Enable as 0xFFFF0001 response for CRS */
+	val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);
+	val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT);
+	val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 <<
+		AMBA_ERROR_RESPONSE_CRS_SHIFT);
+	dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val);
+
+	/* Configure Max Speed from DT */
+	if (pcie->max_speed && pcie->max_speed != -EINVAL) {
+		val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base +
+					PCI_EXP_LNKCAP);
+		val &= ~PCI_EXP_LNKCAP_SLS;
+		val |= pcie->max_speed;
+		dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP,
+				   val);
+	}
+
+	/* Configure Max lane width from DT */
+	val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP);
+	val &= ~PCI_EXP_LNKCAP_MLW;
+	val |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, pcie->num_lanes);
+	dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val);
+
+	config_gen3_gen4_eq_presets(pcie);
+
+	init_host_aspm(pcie);
+
+	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+	val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+
+	if (pcie->update_fc_fixup) {
+		val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
+		val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
+		dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
+	}
+
+	dw_pcie_setup_rc(pp);
+
+	clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
+
+	/* Assert RST */
+	val = appl_readl(pcie, APPL_PINMUX);
+	val &= ~APPL_PINMUX_PEX_RST;
+	appl_writel(pcie, val, APPL_PINMUX);
+
+	usleep_range(100, 200);
+
+	/* Enable LTSSM */
+	val = appl_readl(pcie, APPL_CTRL);
+	val |= APPL_CTRL_LTSSM_EN;
+	appl_writel(pcie, val, APPL_CTRL);
+
+	/* De-assert RST */
+	val = appl_readl(pcie, APPL_PINMUX);
+	val |= APPL_PINMUX_PEX_RST;
+	appl_writel(pcie, val, APPL_PINMUX);
+
+	msleep(100);
+}
+
+static int tegra_pcie_dw_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+	u32 val, tmp, offset, speed;
+
+	tegra_pcie_prepare_host(pp);
+
+	if (dw_pcie_wait_for_link(pci)) {
+		/*
+		 * There are some endpoints which can't get the link up if
+		 * root port has Data Link Feature (DLF) enabled.
+		 * Refer Spec rev 4.0 ver 1.0 sec 3.4.2 & 7.7.4 for more info
+		 * on Scaled Flow Control and DLF.
+		 * So, need to confirm that is indeed the case here and attempt
+		 * link up once again with DLF disabled.
+		 */
+		val = appl_readl(pcie, APPL_DEBUG);
+		val &= APPL_DEBUG_LTSSM_STATE_MASK;
+		val >>= APPL_DEBUG_LTSSM_STATE_SHIFT;
+		tmp = appl_readl(pcie, APPL_LINK_STATUS);
+		tmp &= APPL_LINK_STATUS_RDLH_LINK_UP;
+		if (!(val == 0x11 && !tmp)) {
+			/* Link is down for all good reasons */
+			return 0;
+		}
+
+		dev_info(pci->dev, "Link is down in DLL");
+		dev_info(pci->dev, "Trying again with DLFE disabled\n");
+		/* Disable LTSSM */
+		val = appl_readl(pcie, APPL_CTRL);
+		val &= ~APPL_CTRL_LTSSM_EN;
+		appl_writel(pcie, val, APPL_CTRL);
+
+		reset_control_assert(pcie->core_rst);
+		reset_control_deassert(pcie->core_rst);
+
+		offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF);
+		val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP);
+		val &= ~PCI_DLF_EXCHANGE_ENABLE;
+		dw_pcie_writel_dbi(pci, offset + PCI_DLF_CAP, val);
+
+		tegra_pcie_prepare_host(pp);
+
+		if (dw_pcie_wait_for_link(pci))
+			return 0;
+	}
+
+	speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
+		PCI_EXP_LNKSTA_CLS;
+	clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
+
+	tegra_pcie_enable_interrupts(pp);
+
+	return 0;
+}
+
+static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
+{
+	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+	u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
+
+	return !!(val & PCI_EXP_LNKSTA_DLLLA);
+}
+
+static void tegra_pcie_set_msi_vec_num(struct pcie_port *pp)
+{
+	pp->num_vectors = MAX_MSI_IRQS;
+}
+
+static const struct dw_pcie_ops tegra_dw_pcie_ops = {
+	.link_up = tegra_pcie_dw_link_up,
+};
+
+static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
+	.rd_own_conf = tegra_pcie_dw_rd_own_conf,
+	.wr_own_conf = tegra_pcie_dw_wr_own_conf,
+	.host_init = tegra_pcie_dw_host_init,
+	.set_num_vectors = tegra_pcie_set_msi_vec_num,
+};
+
+static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
+{
+	unsigned int phy_count = pcie->phy_count;
+
+	while (phy_count--) {
+		phy_power_off(pcie->phys[phy_count]);
+		phy_exit(pcie->phys[phy_count]);
+	}
+}
+
+static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie)
+{
+	unsigned int i;
+	int ret;
+
+	for (i = 0; i < pcie->phy_count; i++) {
+		ret = phy_init(pcie->phys[i]);
+		if (ret < 0)
+			goto phy_power_off;
+
+		ret = phy_power_on(pcie->phys[i]);
+		if (ret < 0)
+			goto phy_exit;
+	}
+
+	return 0;
+
+phy_power_off:
+	while (i--) {
+		phy_power_off(pcie->phys[i]);
+phy_exit:
+		phy_exit(pcie->phys[i]);
+	}
+
+	return ret;
+}
+
+static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
+{
+	struct device_node *np = pcie->dev->of_node;
+	int ret;
+
+	ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt);
+	if (ret < 0) {
+		dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret);
+		return ret;
+	}
+
+	ret = of_property_read_u32(np, "nvidia,aspm-pwr-on-t-us",
+				   &pcie->aspm_pwr_on_t);
+	if (ret < 0)
+		dev_info(pcie->dev, "Failed to read ASPM Power On time: %d\n",
+			 ret);
+
+	ret = of_property_read_u32(np, "nvidia,aspm-l0s-entrance-latency-us",
+				   &pcie->aspm_l0s_enter_lat);
+	if (ret < 0)
+		dev_info(pcie->dev,
+			 "Failed to read ASPM L0s Entrance latency: %d\n", ret);
+
+	ret = of_property_read_u32(np, "num-lanes", &pcie->num_lanes);
+	if (ret < 0) {
+		dev_err(pcie->dev, "Failed to read num-lanes: %d\n", ret);
+		return ret;
+	}
+
+	pcie->max_speed = of_pci_get_max_link_speed(np);
+
+	ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid);
+	if (ret) {
+		dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret);
+		return ret;
+	}
+
+	ret = of_property_count_strings(np, "phy-names");
+	if (ret < 0) {
+		dev_err(pcie->dev, "Failed to find PHY entries: %d\n",
+			ret);
+		return ret;
+	}
+	pcie->phy_count = ret;
+
+	if (of_property_read_bool(np, "nvidia,update-fc-fixup"))
+		pcie->update_fc_fixup = true;
+
+	pcie->supports_clkreq =
+		of_property_read_bool(pcie->dev->of_node, "supports-clkreq");
+
+	pcie->enable_cdm_check =
+		of_property_read_bool(np, "snps,enable-cdm-check");
+
+	return 0;
+}
+
+static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
+					  bool enable)
+{
+	struct mrq_uphy_response resp;
+	struct tegra_bpmp_message msg;
+	struct mrq_uphy_request req;
+
+	/* Controller-5 doesn't need to have its state set by BPMP-FW */
+	if (pcie->cid == 5)
+		return 0;
+
+	memset(&req, 0, sizeof(req));
+	memset(&resp, 0, sizeof(resp));
+
+	req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE;
+	req.controller_state.pcie_controller = pcie->cid;
+	req.controller_state.enable = enable;
+
+	memset(&msg, 0, sizeof(msg));
+	msg.mrq = MRQ_UPHY;
+	msg.tx.data = &req;
+	msg.tx.size = sizeof(req);
+	msg.rx.data = &resp;
+	msg.rx.size = sizeof(resp);
+
+	return tegra_bpmp_transfer(pcie->bpmp, &msg);
+}
+
+static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
+{
+	struct pcie_port *pp = &pcie->pci.pp;
+	struct pci_bus *child, *root_bus = NULL;
+	struct pci_dev *pdev;
+
+	/*
+	 * link doesn't go into L2 state with some of the endpoints with Tegra
+	 * if they are not in D0 state. So, need to make sure that immediate
+	 * downstream devices are in D0 state before sending PME_TurnOff to put
+	 * link into L2 state.
+	 * This is as per PCI Express Base r4.0 v1.0 September 27-2017,
+	 * 5.2 Link State Power Management (Page #428).
+	 */
+
+	list_for_each_entry(child, &pp->root_bus->children, node) {
+		/* Bring downstream devices to D0 if they are not already in */
+		if (child->parent == pp->root_bus) {
+			root_bus = child;
+			break;
+		}
+	}
+
+	if (!root_bus) {
+		dev_err(pcie->dev, "Failed to find downstream devices\n");
+		return;
+	}
+
+	list_for_each_entry(pdev, &root_bus->devices, bus_list) {
+		if (PCI_SLOT(pdev->devfn) == 0) {
+			if (pci_set_power_state(pdev, PCI_D0))
+				dev_err(pcie->dev,
+					"Failed to transition %s to D0 state\n",
+					dev_name(&pdev->dev));
+		}
+	}
+}
+
+static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie)
+{
+	pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3");
+	if (IS_ERR(pcie->slot_ctl_3v3)) {
+		if (PTR_ERR(pcie->slot_ctl_3v3) != -ENODEV)
+			return PTR_ERR(pcie->slot_ctl_3v3);
+
+		pcie->slot_ctl_3v3 = NULL;
+	}
+
+	pcie->slot_ctl_12v = devm_regulator_get_optional(pcie->dev, "vpcie12v");
+	if (IS_ERR(pcie->slot_ctl_12v)) {
+		if (PTR_ERR(pcie->slot_ctl_12v) != -ENODEV)
+			return PTR_ERR(pcie->slot_ctl_12v);
+
+		pcie->slot_ctl_12v = NULL;
+	}
+
+	return 0;
+}
+
+static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie)
+{
+	int ret;
+
+	if (pcie->slot_ctl_3v3) {
+		ret = regulator_enable(pcie->slot_ctl_3v3);
+		if (ret < 0) {
+			dev_err(pcie->dev,
+				"Failed to enable 3.3V slot supply: %d\n", ret);
+			return ret;
+		}
+	}
+
+	if (pcie->slot_ctl_12v) {
+		ret = regulator_enable(pcie->slot_ctl_12v);
+		if (ret < 0) {
+			dev_err(pcie->dev,
+				"Failed to enable 12V slot supply: %d\n", ret);
+			goto fail_12v_enable;
+		}
+	}
+
+	/*
+	 * According to PCI Express Card Electromechanical Specification
+	 * Revision 1.1, Table-2.4, T_PVPERL (Power stable to PERST# inactive)
+	 * should be a minimum of 100ms.
+	 */
+	if (pcie->slot_ctl_3v3 || pcie->slot_ctl_12v)
+		msleep(100);
+
+	return 0;
+
+fail_12v_enable:
+	if (pcie->slot_ctl_3v3)
+		regulator_disable(pcie->slot_ctl_3v3);
+	return ret;
+}
+
+static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie)
+{
+	if (pcie->slot_ctl_12v)
+		regulator_disable(pcie->slot_ctl_12v);
+	if (pcie->slot_ctl_3v3)
+		regulator_disable(pcie->slot_ctl_3v3);
+}
+
+static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
+					bool en_hw_hot_rst)
+{
+	int ret;
+	u32 val;
+
+	ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);
+	if (ret) {
+		dev_err(pcie->dev,
+			"Failed to enable controller %u: %d\n", pcie->cid, ret);
+		return ret;
+	}
+
+	ret = tegra_pcie_enable_slot_regulators(pcie);
+	if (ret < 0)
+		goto fail_slot_reg_en;
+
+	ret = regulator_enable(pcie->pex_ctl_supply);
+	if (ret < 0) {
+		dev_err(pcie->dev, "Failed to enable regulator: %d\n", ret);
+		goto fail_reg_en;
+	}
+
+	ret = clk_prepare_enable(pcie->core_clk);
+	if (ret) {
+		dev_err(pcie->dev, "Failed to enable core clock: %d\n", ret);
+		goto fail_core_clk;
+	}
+
+	ret = reset_control_deassert(pcie->core_apb_rst);
+	if (ret) {
+		dev_err(pcie->dev, "Failed to deassert core APB reset: %d\n",
+			ret);
+		goto fail_core_apb_rst;
+	}
+
+	if (en_hw_hot_rst) {
+		/* Enable HW_HOT_RST mode */
+		val = appl_readl(pcie, APPL_CTRL);
+		val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
+			 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+		val |= APPL_CTRL_HW_HOT_RST_EN;
+		appl_writel(pcie, val, APPL_CTRL);
+	}
+
+	ret = tegra_pcie_enable_phy(pcie);
+	if (ret) {
+		dev_err(pcie->dev, "Failed to enable PHY: %d\n", ret);
+		goto fail_phy;
+	}
+
+	/* Update CFG base address */
+	appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
+		    APPL_CFG_BASE_ADDR);
+
+	/* Configure this core for RP mode operation */
+	appl_writel(pcie, APPL_DM_TYPE_RP, APPL_DM_TYPE);
+
+	appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
+
+	val = appl_readl(pcie, APPL_CTRL);
+	appl_writel(pcie, val | APPL_CTRL_SYS_PRE_DET_STATE, APPL_CTRL);
+
+	val = appl_readl(pcie, APPL_CFG_MISC);
+	val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
+	appl_writel(pcie, val, APPL_CFG_MISC);
+
+	if (!pcie->supports_clkreq) {
+		val = appl_readl(pcie, APPL_PINMUX);
+		val |= APPL_PINMUX_CLKREQ_OUT_OVRD_EN;
+		val |= APPL_PINMUX_CLKREQ_OUT_OVRD;
+		appl_writel(pcie, val, APPL_PINMUX);
+	}
+
+	/* Update iATU_DMA base address */
+	appl_writel(pcie,
+		    pcie->atu_dma_res->start & APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
+		    APPL_CFG_IATU_DMA_BASE_ADDR);
+
+	reset_control_deassert(pcie->core_rst);
+
+	pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
+						      PCI_CAP_ID_EXP);
+
+	/* Disable ASPM-L1SS advertisement as there is no CLKREQ routing */
+	if (!pcie->supports_clkreq) {
+		disable_aspm_l11(pcie);
+		disable_aspm_l12(pcie);
+	}
+
+	return ret;
+
+fail_phy:
+	reset_control_assert(pcie->core_apb_rst);
+fail_core_apb_rst:
+	clk_disable_unprepare(pcie->core_clk);
+fail_core_clk:
+	regulator_disable(pcie->pex_ctl_supply);
+fail_reg_en:
+	tegra_pcie_disable_slot_regulators(pcie);
+fail_slot_reg_en:
+	tegra_pcie_bpmp_set_ctrl_state(pcie, false);
+
+	return ret;
+}
+
+static int __deinit_controller(struct tegra_pcie_dw *pcie)
+{
+	int ret;
+
+	ret = reset_control_assert(pcie->core_rst);
+	if (ret) {
+		dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n",
+			ret);
+		return ret;
+	}
+
+	tegra_pcie_disable_phy(pcie);
+
+	ret = reset_control_assert(pcie->core_apb_rst);
+	if (ret) {
+		dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret);
+		return ret;
+	}
+
+	clk_disable_unprepare(pcie->core_clk);
+
+	ret = regulator_disable(pcie->pex_ctl_supply);
+	if (ret) {
+		dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret);
+		return ret;
+	}
+
+	tegra_pcie_disable_slot_regulators(pcie);
+
+	ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
+	if (ret) {
+		dev_err(pcie->dev, "Failed to disable controller %d: %d\n",
+			pcie->cid, ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
+{
+	struct dw_pcie *pci = &pcie->pci;
+	struct pcie_port *pp = &pci->pp;
+	int ret;
+
+	ret = tegra_pcie_config_controller(pcie, false);
+	if (ret < 0)
+		return ret;
+
+	pp->ops = &tegra_pcie_dw_host_ops;
+
+	ret = dw_pcie_host_init(pp);
+	if (ret < 0) {
+		dev_err(pcie->dev, "Failed to add PCIe port: %d\n", ret);
+		goto fail_host_init;
+	}
+
+	return 0;
+
+fail_host_init:
+	return __deinit_controller(pcie);
+}
+
+static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)
+{
+	u32 val;
+
+	if (!tegra_pcie_dw_link_up(&pcie->pci))
+		return 0;
+
+	val = appl_readl(pcie, APPL_RADM_STATUS);
+	val |= APPL_PM_XMT_TURNOFF_STATE;
+	appl_writel(pcie, val, APPL_RADM_STATUS);
+
+	return readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, val,
+				 val & APPL_DEBUG_PM_LINKST_IN_L2_LAT,
+				 1, PME_ACK_TIMEOUT);
+}
+
+static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
+{
+	u32 data;
+	int err;
+
+	if (!tegra_pcie_dw_link_up(&pcie->pci)) {
+		dev_dbg(pcie->dev, "PCIe link is not up...!\n");
+		return;
+	}
+
+	if (tegra_pcie_try_link_l2(pcie)) {
+		dev_info(pcie->dev, "Link didn't transition to L2 state\n");
+		/*
+		 * TX lane clock freq will reset to Gen1 only if link is in L2
+		 * or detect state.
+		 * So apply pex_rst to end point to force RP to go into detect
+		 * state
+		 */
+		data = appl_readl(pcie, APPL_PINMUX);
+		data &= ~APPL_PINMUX_PEX_RST;
+		appl_writel(pcie, data, APPL_PINMUX);
+
+		err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG,
+						data,
+						((data &
+						APPL_DEBUG_LTSSM_STATE_MASK) >>
+						APPL_DEBUG_LTSSM_STATE_SHIFT) ==
+						LTSSM_STATE_PRE_DETECT,
+						1, LTSSM_TIMEOUT);
+		if (err) {
+			dev_info(pcie->dev, "Link didn't go to detect state\n");
+		} else {
+			/* Disable LTSSM after link is in detect state */
+			data = appl_readl(pcie, APPL_CTRL);
+			data &= ~APPL_CTRL_LTSSM_EN;
+			appl_writel(pcie, data, APPL_CTRL);
+		}
+	}
+	/*
+	 * DBI registers may not be accessible after this as PLL-E would be
+	 * down depending on how CLKREQ is pulled by end point
+	 */
+	data = appl_readl(pcie, APPL_PINMUX);
+	data |= (APPL_PINMUX_CLKREQ_OVERRIDE_EN | APPL_PINMUX_CLKREQ_OVERRIDE);
+	/* Cut REFCLK to slot */
+	data |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
+	data &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
+	appl_writel(pcie, data, APPL_PINMUX);
+}
+
+static int tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
+{
+	tegra_pcie_downstream_dev_to_D0(pcie);
+	dw_pcie_host_deinit(&pcie->pci.pp);
+	tegra_pcie_dw_pme_turnoff(pcie);
+
+	return __deinit_controller(pcie);
+}
+
+static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
+{
+	struct pcie_port *pp = &pcie->pci.pp;
+	struct device *dev = pcie->dev;
+	char *name;
+	int ret;
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		pp->msi_irq = of_irq_get_byname(dev->of_node, "msi");
+		if (!pp->msi_irq) {
+			dev_err(dev, "Failed to get MSI interrupt\n");
+			return -ENODEV;
+		}
+	}
+
+	pm_runtime_enable(dev);
+
+	ret = pm_runtime_get_sync(dev);
+	if (ret < 0) {
+		dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
+			ret);
+		goto fail_pm_get_sync;
+	}
+
+	ret = pinctrl_pm_select_default_state(dev);
+	if (ret < 0) {
+		dev_err(dev, "Failed to configure sideband pins: %d\n", ret);
+		goto fail_pm_get_sync;
+	}
+
+	tegra_pcie_init_controller(pcie);
+
+	pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci);
+	if (!pcie->link_state) {
+		ret = -ENOMEDIUM;
+		goto fail_host_init;
+	}
+
+	name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
+	if (!name) {
+		ret = -ENOMEM;
+		goto fail_host_init;
+	}
+
+	pcie->debugfs = debugfs_create_dir(name, NULL);
+	if (!pcie->debugfs)
+		dev_err(dev, "Failed to create debugfs\n");
+	else
+		init_debugfs(pcie);
+
+	return ret;
+
+fail_host_init:
+	tegra_pcie_deinit_controller(pcie);
+fail_pm_get_sync:
+	pm_runtime_put_sync(dev);
+	pm_runtime_disable(dev);
+	return ret;
+}
+
+static int tegra_pcie_dw_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *atu_dma_res;
+	struct tegra_pcie_dw *pcie;
+	struct resource *dbi_res;
+	struct pcie_port *pp;
+	struct dw_pcie *pci;
+	struct phy **phys;
+	char *name;
+	int ret;
+	u32 i;
+
+	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+	if (!pcie)
+		return -ENOMEM;
+
+	pci = &pcie->pci;
+	pci->dev = &pdev->dev;
+	pci->ops = &tegra_dw_pcie_ops;
+	pp = &pci->pp;
+	pcie->dev = &pdev->dev;
+
+	ret = tegra_pcie_dw_parse_dt(pcie);
+	if (ret < 0) {
+		dev_err(dev, "Failed to parse device tree: %d\n", ret);
+		return ret;
+	}
+
+	ret = tegra_pcie_get_slot_regulators(pcie);
+	if (ret < 0) {
+		dev_err(dev, "Failed to get slot regulators: %d\n", ret);
+		return ret;
+	}
+
+	pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl");
+	if (IS_ERR(pcie->pex_ctl_supply)) {
+		ret = PTR_ERR(pcie->pex_ctl_supply);
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "Failed to get regulator: %ld\n",
+				PTR_ERR(pcie->pex_ctl_supply));
+		return ret;
+	}
+
+	pcie->core_clk = devm_clk_get(dev, "core");
+	if (IS_ERR(pcie->core_clk)) {
+		dev_err(dev, "Failed to get core clock: %ld\n",
+			PTR_ERR(pcie->core_clk));
+		return PTR_ERR(pcie->core_clk);
+	}
+
+	pcie->appl_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						      "appl");
+	if (!pcie->appl_res) {
+		dev_err(dev, "Failed to find \"appl\" region\n");
+		return -ENODEV;
+	}
+
+	pcie->appl_base = devm_ioremap_resource(dev, pcie->appl_res);
+	if (IS_ERR(pcie->appl_base))
+		return PTR_ERR(pcie->appl_base);
+
+	pcie->core_apb_rst = devm_reset_control_get(dev, "apb");
+	if (IS_ERR(pcie->core_apb_rst)) {
+		dev_err(dev, "Failed to get APB reset: %ld\n",
+			PTR_ERR(pcie->core_apb_rst));
+		return PTR_ERR(pcie->core_apb_rst);
+	}
+
+	phys = devm_kcalloc(dev, pcie->phy_count, sizeof(*phys), GFP_KERNEL);
+	if (!phys)
+		return -ENOMEM;
+
+	for (i = 0; i < pcie->phy_count; i++) {
+		name = kasprintf(GFP_KERNEL, "p2u-%u", i);
+		if (!name) {
+			dev_err(dev, "Failed to create P2U string\n");
+			return -ENOMEM;
+		}
+		phys[i] = devm_phy_get(dev, name);
+		kfree(name);
+		if (IS_ERR(phys[i])) {
+			ret = PTR_ERR(phys[i]);
+			if (ret != -EPROBE_DEFER)
+				dev_err(dev, "Failed to get PHY: %d\n", ret);
+			return ret;
+		}
+	}
+
+	pcie->phys = phys;
+
+	dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+	if (!dbi_res) {
+		dev_err(dev, "Failed to find \"dbi\" region\n");
+		return -ENODEV;
+	}
+	pcie->dbi_res = dbi_res;
+
+	pci->dbi_base = devm_ioremap_resource(dev, dbi_res);
+	if (IS_ERR(pci->dbi_base))
+		return PTR_ERR(pci->dbi_base);
+
+	/* Tegra HW locates DBI2 at a fixed offset from DBI */
+	pci->dbi_base2 = pci->dbi_base + 0x1000;
+
+	atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						   "atu_dma");
+	if (!atu_dma_res) {
+		dev_err(dev, "Failed to find \"atu_dma\" region\n");
+		return -ENODEV;
+	}
+	pcie->atu_dma_res = atu_dma_res;
+
+	pci->atu_base = devm_ioremap_resource(dev, atu_dma_res);
+	if (IS_ERR(pci->atu_base))
+		return PTR_ERR(pci->atu_base);
+
+	pcie->core_rst = devm_reset_control_get(dev, "core");
+	if (IS_ERR(pcie->core_rst)) {
+		dev_err(dev, "Failed to get core reset: %ld\n",
+			PTR_ERR(pcie->core_rst));
+		return PTR_ERR(pcie->core_rst);
+	}
+
+	pp->irq = platform_get_irq_byname(pdev, "intr");
+	if (!pp->irq) {
+		dev_err(dev, "Failed to get \"intr\" interrupt\n");
+		return -ENODEV;
+	}
+
+	ret = devm_request_irq(dev, pp->irq, tegra_pcie_irq_handler,
+			       IRQF_SHARED, "tegra-pcie-intr", pcie);
+	if (ret) {
+		dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, ret);
+		return ret;
+	}
+
+	pcie->bpmp = tegra_bpmp_get(dev);
+	if (IS_ERR(pcie->bpmp))
+		return PTR_ERR(pcie->bpmp);
+
+	platform_set_drvdata(pdev, pcie);
+
+	ret = tegra_pcie_config_rp(pcie);
+	if (ret && ret != -ENOMEDIUM)
+		goto fail;
+	else
+		return 0;
+
+fail:
+	tegra_bpmp_put(pcie->bpmp);
+	return ret;
+}
+
+static int tegra_pcie_dw_remove(struct platform_device *pdev)
+{
+	struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
+
+	if (!pcie->link_state)
+		return 0;
+
+	debugfs_remove_recursive(pcie->debugfs);
+	tegra_pcie_deinit_controller(pcie);
+	pm_runtime_put_sync(pcie->dev);
+	pm_runtime_disable(pcie->dev);
+	tegra_bpmp_put(pcie->bpmp);
+
+	return 0;
+}
+
+static int tegra_pcie_dw_suspend_late(struct device *dev)
+{
+	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+	u32 val;
+
+	if (!pcie->link_state)
+		return 0;
+
+	/* Enable HW_HOT_RST mode */
+	val = appl_readl(pcie, APPL_CTRL);
+	val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
+		 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+	val |= APPL_CTRL_HW_HOT_RST_EN;
+	appl_writel(pcie, val, APPL_CTRL);
+
+	return 0;
+}
+
+static int tegra_pcie_dw_suspend_noirq(struct device *dev)
+{
+	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+
+	if (!pcie->link_state)
+		return 0;
+
+	/* Save MSI interrupt vector */
+	pcie->msi_ctrl_int = dw_pcie_readl_dbi(&pcie->pci,
+					       PORT_LOGIC_MSI_CTRL_INT_0_EN);
+	tegra_pcie_downstream_dev_to_D0(pcie);
+	tegra_pcie_dw_pme_turnoff(pcie);
+
+	return __deinit_controller(pcie);
+}
+
+static int tegra_pcie_dw_resume_noirq(struct device *dev)
+{
+	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+	int ret;
+
+	if (!pcie->link_state)
+		return 0;
+
+	ret = tegra_pcie_config_controller(pcie, true);
+	if (ret < 0)
+		return ret;
+
+	ret = tegra_pcie_dw_host_init(&pcie->pci.pp);
+	if (ret < 0) {
+		dev_err(dev, "Failed to init host: %d\n", ret);
+		goto fail_host_init;
+	}
+
+	/* Restore MSI interrupt vector */
+	dw_pcie_writel_dbi(&pcie->pci, PORT_LOGIC_MSI_CTRL_INT_0_EN,
+			   pcie->msi_ctrl_int);
+
+	return 0;
+
+fail_host_init:
+	return __deinit_controller(pcie);
+}
+
+static int tegra_pcie_dw_resume_early(struct device *dev)
+{
+	struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+	u32 val;
+
+	if (!pcie->link_state)
+		return 0;
+
+	/* Disable HW_HOT_RST mode */
+	val = appl_readl(pcie, APPL_CTRL);
+	val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
+		 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+	val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST <<
+	       APPL_CTRL_HW_HOT_RST_MODE_SHIFT;
+	val &= ~APPL_CTRL_HW_HOT_RST_EN;
+	appl_writel(pcie, val, APPL_CTRL);
+
+	return 0;
+}
+
+static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
+{
+	struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
+
+	if (!pcie->link_state)
+		return;
+
+	debugfs_remove_recursive(pcie->debugfs);
+	tegra_pcie_downstream_dev_to_D0(pcie);
+
+	disable_irq(pcie->pci.pp.irq);
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		disable_irq(pcie->pci.pp.msi_irq);
+
+	tegra_pcie_dw_pme_turnoff(pcie);
+	__deinit_controller(pcie);
+}
+
+static const struct of_device_id tegra_pcie_dw_of_match[] = {
+	{
+		.compatible = "nvidia,tegra194-pcie",
+	},
+	{},
+};
+
+static const struct dev_pm_ops tegra_pcie_dw_pm_ops = {
+	.suspend_late = tegra_pcie_dw_suspend_late,
+	.suspend_noirq = tegra_pcie_dw_suspend_noirq,
+	.resume_noirq = tegra_pcie_dw_resume_noirq,
+	.resume_early = tegra_pcie_dw_resume_early,
+};
+
+static struct platform_driver tegra_pcie_dw_driver = {
+	.probe = tegra_pcie_dw_probe,
+	.remove = tegra_pcie_dw_remove,
+	.shutdown = tegra_pcie_dw_shutdown,
+	.driver = {
+		.name	= "tegra194-pcie",
+		.pm = &tegra_pcie_dw_pm_ops,
+		.of_match_table = tegra_pcie_dw_of_match,
+	},
+};
+module_platform_driver(tegra_pcie_dw_driver);
+
+MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);
+
+MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/marvell/linux/drivers/pci/controller/dwc/pcie-uniphier.c b/marvell/linux/drivers/pci/controller/dwc/pcie-uniphier.c
new file mode 100644
index 0000000..3f30ee4
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -0,0 +1,476 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for UniPhier SoCs
+ * Copyright 2018 Socionext Inc.
+ * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+#define PCL_PINCTRL0			0x002c
+#define PCL_PERST_PLDN_REGEN		BIT(12)
+#define PCL_PERST_NOE_REGEN		BIT(11)
+#define PCL_PERST_OUT_REGEN		BIT(8)
+#define PCL_PERST_PLDN_REGVAL		BIT(4)
+#define PCL_PERST_NOE_REGVAL		BIT(3)
+#define PCL_PERST_OUT_REGVAL		BIT(0)
+
+#define PCL_PIPEMON			0x0044
+#define PCL_PCLK_ALIVE			BIT(15)
+
+#define PCL_APP_READY_CTRL		0x8008
+#define PCL_APP_LTSSM_ENABLE		BIT(0)
+
+#define PCL_APP_PM0			0x8078
+#define PCL_SYS_AUX_PWR_DET		BIT(8)
+
+#define PCL_RCV_INT			0x8108
+#define PCL_RCV_INT_ALL_ENABLE		GENMASK(20, 17)
+#define PCL_CFG_BW_MGT_STATUS		BIT(4)
+#define PCL_CFG_LINK_AUTO_BW_STATUS	BIT(3)
+#define PCL_CFG_AER_RC_ERR_MSI_STATUS	BIT(2)
+#define PCL_CFG_PME_MSI_STATUS		BIT(1)
+
+#define PCL_RCV_INTX			0x810c
+#define PCL_RCV_INTX_ALL_ENABLE		GENMASK(19, 16)
+#define PCL_RCV_INTX_ALL_MASK		GENMASK(11, 8)
+#define PCL_RCV_INTX_MASK_SHIFT		8
+#define PCL_RCV_INTX_ALL_STATUS		GENMASK(3, 0)
+#define PCL_RCV_INTX_STATUS_SHIFT	0
+
+#define PCL_STATUS_LINK			0x8140
+#define PCL_RDLH_LINK_UP		BIT(1)
+#define PCL_XMLH_LINK_UP		BIT(0)
+
+struct uniphier_pcie_priv {
+	void __iomem *base;
+	struct dw_pcie pci;
+	struct clk *clk;
+	struct reset_control *rst;
+	struct phy *phy;
+	struct irq_domain *legacy_irq_domain;
+};
+
+#define to_uniphier_pcie(x)	dev_get_drvdata((x)->dev)
+
+static void uniphier_pcie_ltssm_enable(struct uniphier_pcie_priv *priv,
+				       bool enable)
+{
+	u32 val;
+
+	val = readl(priv->base + PCL_APP_READY_CTRL);
+	if (enable)
+		val |= PCL_APP_LTSSM_ENABLE;
+	else
+		val &= ~PCL_APP_LTSSM_ENABLE;
+	writel(val, priv->base + PCL_APP_READY_CTRL);
+}
+
+static void uniphier_pcie_init_rc(struct uniphier_pcie_priv *priv)
+{
+	u32 val;
+
+	/* use auxiliary power detection */
+	val = readl(priv->base + PCL_APP_PM0);
+	val |= PCL_SYS_AUX_PWR_DET;
+	writel(val, priv->base + PCL_APP_PM0);
+
+	/* assert PERST# */
+	val = readl(priv->base + PCL_PINCTRL0);
+	val &= ~(PCL_PERST_NOE_REGVAL | PCL_PERST_OUT_REGVAL
+		 | PCL_PERST_PLDN_REGVAL);
+	val |= PCL_PERST_NOE_REGEN | PCL_PERST_OUT_REGEN
+		| PCL_PERST_PLDN_REGEN;
+	writel(val, priv->base + PCL_PINCTRL0);
+
+	uniphier_pcie_ltssm_enable(priv, false);
+
+	usleep_range(100000, 200000);
+
+	/* deassert PERST# */
+	val = readl(priv->base + PCL_PINCTRL0);
+	val |= PCL_PERST_OUT_REGVAL | PCL_PERST_OUT_REGEN;
+	writel(val, priv->base + PCL_PINCTRL0);
+}
+
+static int uniphier_pcie_wait_rc(struct uniphier_pcie_priv *priv)
+{
+	u32 status;
+	int ret;
+
+	/* wait PIPE clock */
+	ret = readl_poll_timeout(priv->base + PCL_PIPEMON, status,
+				 status & PCL_PCLK_ALIVE, 100000, 1000000);
+	if (ret) {
+		dev_err(priv->pci.dev,
+			"Failed to initialize controller in RC mode\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int uniphier_pcie_link_up(struct dw_pcie *pci)
+{
+	struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+	u32 val, mask;
+
+	val = readl(priv->base + PCL_STATUS_LINK);
+	mask = PCL_RDLH_LINK_UP | PCL_XMLH_LINK_UP;
+
+	return (val & mask) == mask;
+}
+
+static int uniphier_pcie_establish_link(struct dw_pcie *pci)
+{
+	struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+
+	if (dw_pcie_link_up(pci))
+		return 0;
+
+	uniphier_pcie_ltssm_enable(priv, true);
+
+	return dw_pcie_wait_for_link(pci);
+}
+
+static void uniphier_pcie_stop_link(struct dw_pcie *pci)
+{
+	struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+
+	uniphier_pcie_ltssm_enable(priv, false);
+}
+
+static void uniphier_pcie_irq_enable(struct uniphier_pcie_priv *priv)
+{
+	writel(PCL_RCV_INT_ALL_ENABLE, priv->base + PCL_RCV_INT);
+	writel(PCL_RCV_INTX_ALL_ENABLE, priv->base + PCL_RCV_INTX);
+}
+
+static void uniphier_pcie_irq_disable(struct uniphier_pcie_priv *priv)
+{
+	writel(0, priv->base + PCL_RCV_INT);
+	writel(0, priv->base + PCL_RCV_INTX);
+}
+
+static void uniphier_pcie_irq_ack(struct irq_data *d)
+{
+	struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+	u32 val;
+
+	val = readl(priv->base + PCL_RCV_INTX);
+	val &= ~PCL_RCV_INTX_ALL_STATUS;
+	val |= BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_STATUS_SHIFT);
+	writel(val, priv->base + PCL_RCV_INTX);
+}
+
+static void uniphier_pcie_irq_mask(struct irq_data *d)
+{
+	struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+	u32 val;
+
+	val = readl(priv->base + PCL_RCV_INTX);
+	val &= ~PCL_RCV_INTX_ALL_MASK;
+	val |= BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT);
+	writel(val, priv->base + PCL_RCV_INTX);
+}
+
+static void uniphier_pcie_irq_unmask(struct irq_data *d)
+{
+	struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+	u32 val;
+
+	val = readl(priv->base + PCL_RCV_INTX);
+	val &= ~PCL_RCV_INTX_ALL_MASK;
+	val &= ~BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT);
+	writel(val, priv->base + PCL_RCV_INTX);
+}
+
+static struct irq_chip uniphier_pcie_irq_chip = {
+	.name = "PCI",
+	.irq_ack = uniphier_pcie_irq_ack,
+	.irq_mask = uniphier_pcie_irq_mask,
+	.irq_unmask = uniphier_pcie_irq_unmask,
+};
+
+static int uniphier_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+				  irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &uniphier_pcie_irq_chip,
+				 handle_level_irq);
+	irq_set_chip_data(irq, domain->host_data);
+
+	return 0;
+}
+
+static const struct irq_domain_ops uniphier_intx_domain_ops = {
+	.map = uniphier_pcie_intx_map,
+};
+
+static void uniphier_pcie_irq_handler(struct irq_desc *desc)
+{
+	struct pcie_port *pp = irq_desc_get_handler_data(desc);
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	unsigned long reg;
+	u32 val, bit, virq;
+
+	/* INT for debug */
+	val = readl(priv->base + PCL_RCV_INT);
+
+	if (val & PCL_CFG_BW_MGT_STATUS)
+		dev_dbg(pci->dev, "Link Bandwidth Management Event\n");
+	if (val & PCL_CFG_LINK_AUTO_BW_STATUS)
+		dev_dbg(pci->dev, "Link Autonomous Bandwidth Event\n");
+	if (val & PCL_CFG_AER_RC_ERR_MSI_STATUS)
+		dev_dbg(pci->dev, "Root Error\n");
+	if (val & PCL_CFG_PME_MSI_STATUS)
+		dev_dbg(pci->dev, "PME Interrupt\n");
+
+	writel(val, priv->base + PCL_RCV_INT);
+
+	/* INTx */
+	chained_irq_enter(chip, desc);
+
+	val = readl(priv->base + PCL_RCV_INTX);
+	reg = FIELD_GET(PCL_RCV_INTX_ALL_STATUS, val);
+
+	for_each_set_bit(bit, &reg, PCI_NUM_INTX) {
+		virq = irq_linear_revmap(priv->legacy_irq_domain, bit);
+		generic_handle_irq(virq);
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+	struct device_node *np = pci->dev->of_node;
+	struct device_node *np_intc;
+	int ret = 0;
+
+	np_intc = of_get_child_by_name(np, "legacy-interrupt-controller");
+	if (!np_intc) {
+		dev_err(pci->dev, "Failed to get legacy-interrupt-controller node\n");
+		return -EINVAL;
+	}
+
+	pp->irq = irq_of_parse_and_map(np_intc, 0);
+	if (!pp->irq) {
+		dev_err(pci->dev, "Failed to get an IRQ entry in legacy-interrupt-controller\n");
+		ret = -EINVAL;
+		goto out_put_node;
+	}
+
+	priv->legacy_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX,
+						&uniphier_intx_domain_ops, pp);
+	if (!priv->legacy_irq_domain) {
+		dev_err(pci->dev, "Failed to get INTx domain\n");
+		ret = -ENODEV;
+		goto out_put_node;
+	}
+
+	irq_set_chained_handler_and_data(pp->irq, uniphier_pcie_irq_handler,
+					 pp);
+
+out_put_node:
+	of_node_put(np_intc);
+	return ret;
+}
+
+static int uniphier_pcie_host_init(struct pcie_port *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+	int ret;
+
+	ret = uniphier_pcie_config_legacy_irq(pp);
+	if (ret)
+		return ret;
+
+	uniphier_pcie_irq_enable(priv);
+
+	dw_pcie_setup_rc(pp);
+	ret = uniphier_pcie_establish_link(pci);
+	if (ret)
+		return ret;
+
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		dw_pcie_msi_init(pp);
+
+	return 0;
+}
+
+static const struct dw_pcie_host_ops uniphier_pcie_host_ops = {
+	.host_init = uniphier_pcie_host_init,
+};
+
+static int uniphier_add_pcie_port(struct uniphier_pcie_priv *priv,
+				  struct platform_device *pdev)
+{
+	struct dw_pcie *pci = &priv->pci;
+	struct pcie_port *pp = &pci->pp;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	pp->ops = &uniphier_pcie_host_ops;
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		pp->msi_irq = platform_get_irq_byname(pdev, "msi");
+		if (pp->msi_irq < 0)
+			return pp->msi_irq;
+	}
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "Failed to initialize host (%d)\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int uniphier_pcie_host_enable(struct uniphier_pcie_priv *priv)
+{
+	int ret;
+
+	ret = clk_prepare_enable(priv->clk);
+	if (ret)
+		return ret;
+
+	ret = reset_control_deassert(priv->rst);
+	if (ret)
+		goto out_clk_disable;
+
+	uniphier_pcie_init_rc(priv);
+
+	ret = phy_init(priv->phy);
+	if (ret)
+		goto out_rst_assert;
+
+	ret = uniphier_pcie_wait_rc(priv);
+	if (ret)
+		goto out_phy_exit;
+
+	return 0;
+
+out_phy_exit:
+	phy_exit(priv->phy);
+out_rst_assert:
+	reset_control_assert(priv->rst);
+out_clk_disable:
+	clk_disable_unprepare(priv->clk);
+
+	return ret;
+}
+
+static void uniphier_pcie_host_disable(struct uniphier_pcie_priv *priv)
+{
+	uniphier_pcie_irq_disable(priv);
+	phy_exit(priv->phy);
+	reset_control_assert(priv->rst);
+	clk_disable_unprepare(priv->clk);
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	.start_link = uniphier_pcie_establish_link,
+	.stop_link = uniphier_pcie_stop_link,
+	.link_up = uniphier_pcie_link_up,
+};
+
+static int uniphier_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct uniphier_pcie_priv *priv;
+	struct resource *res;
+	int ret;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->pci.dev = dev;
+	priv->pci.ops = &dw_pcie_ops;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+	priv->pci.dbi_base = devm_pci_remap_cfg_resource(dev, res);
+	if (IS_ERR(priv->pci.dbi_base))
+		return PTR_ERR(priv->pci.dbi_base);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "link");
+	priv->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(priv->base))
+		return PTR_ERR(priv->base);
+
+	priv->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(priv->clk))
+		return PTR_ERR(priv->clk);
+
+	priv->rst = devm_reset_control_get_shared(dev, NULL);
+	if (IS_ERR(priv->rst))
+		return PTR_ERR(priv->rst);
+
+	priv->phy = devm_phy_optional_get(dev, "pcie-phy");
+	if (IS_ERR(priv->phy))
+		return PTR_ERR(priv->phy);
+
+	platform_set_drvdata(pdev, priv);
+
+	ret = uniphier_pcie_host_enable(priv);
+	if (ret)
+		return ret;
+
+	return uniphier_add_pcie_port(priv, pdev);
+}
+
+static int uniphier_pcie_remove(struct platform_device *pdev)
+{
+	struct uniphier_pcie_priv *priv = platform_get_drvdata(pdev);
+
+	uniphier_pcie_host_disable(priv);
+
+	return 0;
+}
+
+static const struct of_device_id uniphier_pcie_match[] = {
+	{ .compatible = "socionext,uniphier-pcie", },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, uniphier_pcie_match);
+
+static struct platform_driver uniphier_pcie_driver = {
+	.probe  = uniphier_pcie_probe,
+	.remove = uniphier_pcie_remove,
+	.driver = {
+		.name = "uniphier-pcie",
+		.of_match_table = uniphier_pcie_match,
+	},
+};
+builtin_platform_driver(uniphier_pcie_driver);
+
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/marvell/linux/drivers/pci/controller/pci-aardvark.c b/marvell/linux/drivers/pci/controller/pci-aardvark.c
new file mode 100644
index 0000000..7219ca3
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pci-aardvark.c
@@ -0,0 +1,1679 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Aardvark PCIe controller, used on Marvell Armada
+ * 3700.
+ *
+ * Copyright (C) 2016 Marvell
+ *
+ * Author: Hezi Shahmoon <hezi.shahmoon@marvell.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/of_pci.h>
+
+#include "../pci.h"
+#include "../pci-bridge-emul.h"
+
+/* PCIe core registers */
+#define PCIE_CORE_DEV_ID_REG					0x0
+#define PCIE_CORE_CMD_STATUS_REG				0x4
+#define PCIE_CORE_DEV_REV_REG					0x8
+#define PCIE_CORE_PCIEXP_CAP					0xc0
+#define PCIE_CORE_ERR_CAPCTL_REG				0x118
+#define     PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX			BIT(5)
+#define     PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN			BIT(6)
+#define     PCIE_CORE_ERR_CAPCTL_ECRC_CHCK			BIT(7)
+#define     PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV			BIT(8)
+#define     PCIE_CORE_INT_A_ASSERT_ENABLE			1
+#define     PCIE_CORE_INT_B_ASSERT_ENABLE			2
+#define     PCIE_CORE_INT_C_ASSERT_ENABLE			3
+#define     PCIE_CORE_INT_D_ASSERT_ENABLE			4
+/* PIO registers base address and register offsets */
+#define PIO_BASE_ADDR				0x4000
+#define PIO_CTRL				(PIO_BASE_ADDR + 0x0)
+#define   PIO_CTRL_TYPE_MASK			GENMASK(3, 0)
+#define   PIO_CTRL_ADDR_WIN_DISABLE		BIT(24)
+#define PIO_STAT				(PIO_BASE_ADDR + 0x4)
+#define   PIO_COMPLETION_STATUS_SHIFT		7
+#define   PIO_COMPLETION_STATUS_MASK		GENMASK(9, 7)
+#define   PIO_COMPLETION_STATUS_OK		0
+#define   PIO_COMPLETION_STATUS_UR		1
+#define   PIO_COMPLETION_STATUS_CRS		2
+#define   PIO_COMPLETION_STATUS_CA		4
+#define   PIO_NON_POSTED_REQ			BIT(10)
+#define   PIO_ERR_STATUS			BIT(11)
+#define PIO_ADDR_LS				(PIO_BASE_ADDR + 0x8)
+#define PIO_ADDR_MS				(PIO_BASE_ADDR + 0xc)
+#define PIO_WR_DATA				(PIO_BASE_ADDR + 0x10)
+#define PIO_WR_DATA_STRB			(PIO_BASE_ADDR + 0x14)
+#define PIO_RD_DATA				(PIO_BASE_ADDR + 0x18)
+#define PIO_START				(PIO_BASE_ADDR + 0x1c)
+#define PIO_ISR					(PIO_BASE_ADDR + 0x20)
+#define PIO_ISRM				(PIO_BASE_ADDR + 0x24)
+
+/* Aardvark Control registers */
+#define CONTROL_BASE_ADDR			0x4800
+#define PCIE_CORE_CTRL0_REG			(CONTROL_BASE_ADDR + 0x0)
+#define     PCIE_GEN_SEL_MSK			0x3
+#define     PCIE_GEN_SEL_SHIFT			0x0
+#define     SPEED_GEN_1				0
+#define     SPEED_GEN_2				1
+#define     SPEED_GEN_3				2
+#define     IS_RC_MSK				1
+#define     IS_RC_SHIFT				2
+#define     LANE_CNT_MSK			0x18
+#define     LANE_CNT_SHIFT			0x3
+#define     LANE_COUNT_1			(0 << LANE_CNT_SHIFT)
+#define     LANE_COUNT_2			(1 << LANE_CNT_SHIFT)
+#define     LANE_COUNT_4			(2 << LANE_CNT_SHIFT)
+#define     LANE_COUNT_8			(3 << LANE_CNT_SHIFT)
+#define     LINK_TRAINING_EN			BIT(6)
+#define     LEGACY_INTA				BIT(28)
+#define     LEGACY_INTB				BIT(29)
+#define     LEGACY_INTC				BIT(30)
+#define     LEGACY_INTD				BIT(31)
+#define PCIE_CORE_CTRL1_REG			(CONTROL_BASE_ADDR + 0x4)
+#define     HOT_RESET_GEN			BIT(0)
+#define PCIE_CORE_CTRL2_REG			(CONTROL_BASE_ADDR + 0x8)
+#define     PCIE_CORE_CTRL2_RESERVED		0x7
+#define     PCIE_CORE_CTRL2_TD_ENABLE		BIT(4)
+#define     PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE	BIT(5)
+#define     PCIE_CORE_CTRL2_OB_WIN_ENABLE	BIT(6)
+#define     PCIE_CORE_CTRL2_MSI_ENABLE		BIT(10)
+#define PCIE_MSG_LOG_REG			(CONTROL_BASE_ADDR + 0x30)
+#define PCIE_ISR0_REG				(CONTROL_BASE_ADDR + 0x40)
+#define PCIE_MSG_PM_PME_MASK			BIT(7)
+#define PCIE_ISR0_MASK_REG			(CONTROL_BASE_ADDR + 0x44)
+#define     PCIE_ISR0_MSI_INT_PENDING		BIT(24)
+#define     PCIE_ISR0_INTX_ASSERT(val)		BIT(16 + (val))
+#define     PCIE_ISR0_INTX_DEASSERT(val)	BIT(20 + (val))
+#define     PCIE_ISR0_ALL_MASK			GENMASK(31, 0)
+#define PCIE_ISR1_REG				(CONTROL_BASE_ADDR + 0x48)
+#define PCIE_ISR1_MASK_REG			(CONTROL_BASE_ADDR + 0x4C)
+#define     PCIE_ISR1_POWER_STATE_CHANGE	BIT(4)
+#define     PCIE_ISR1_FLUSH			BIT(5)
+#define     PCIE_ISR1_INTX_ASSERT(val)		BIT(8 + (val))
+#define     PCIE_ISR1_ALL_MASK			GENMASK(31, 0)
+#define PCIE_MSI_ADDR_LOW_REG			(CONTROL_BASE_ADDR + 0x50)
+#define PCIE_MSI_ADDR_HIGH_REG			(CONTROL_BASE_ADDR + 0x54)
+#define PCIE_MSI_STATUS_REG			(CONTROL_BASE_ADDR + 0x58)
+#define PCIE_MSI_MASK_REG			(CONTROL_BASE_ADDR + 0x5C)
+#define     PCIE_MSI_ALL_MASK			GENMASK(31, 0)
+#define PCIE_MSI_PAYLOAD_REG			(CONTROL_BASE_ADDR + 0x9C)
+#define     PCIE_MSI_DATA_MASK			GENMASK(15, 0)
+
+/* PCIe window configuration */
+#define OB_WIN_BASE_ADDR			0x4c00
+#define OB_WIN_BLOCK_SIZE			0x20
+#define OB_WIN_COUNT				8
+#define OB_WIN_REG_ADDR(win, offset)		(OB_WIN_BASE_ADDR + \
+						  OB_WIN_BLOCK_SIZE * (win) + \
+						  (offset))
+#define OB_WIN_MATCH_LS(win)			OB_WIN_REG_ADDR(win, 0x00)
+#define     OB_WIN_ENABLE			BIT(0)
+#define OB_WIN_MATCH_MS(win)			OB_WIN_REG_ADDR(win, 0x04)
+#define OB_WIN_REMAP_LS(win)			OB_WIN_REG_ADDR(win, 0x08)
+#define OB_WIN_REMAP_MS(win)			OB_WIN_REG_ADDR(win, 0x0c)
+#define OB_WIN_MASK_LS(win)			OB_WIN_REG_ADDR(win, 0x10)
+#define OB_WIN_MASK_MS(win)			OB_WIN_REG_ADDR(win, 0x14)
+#define OB_WIN_ACTIONS(win)			OB_WIN_REG_ADDR(win, 0x18)
+#define OB_WIN_DEFAULT_ACTIONS			(OB_WIN_ACTIONS(OB_WIN_COUNT-1) + 0x4)
+#define     OB_WIN_FUNC_NUM_MASK		GENMASK(31, 24)
+#define     OB_WIN_FUNC_NUM_SHIFT		24
+#define     OB_WIN_FUNC_NUM_ENABLE		BIT(23)
+#define     OB_WIN_BUS_NUM_BITS_MASK		GENMASK(22, 20)
+#define     OB_WIN_BUS_NUM_BITS_SHIFT		20
+#define     OB_WIN_MSG_CODE_ENABLE		BIT(22)
+#define     OB_WIN_MSG_CODE_MASK		GENMASK(21, 14)
+#define     OB_WIN_MSG_CODE_SHIFT		14
+#define     OB_WIN_MSG_PAYLOAD_LEN		BIT(12)
+#define     OB_WIN_ATTR_ENABLE			BIT(11)
+#define     OB_WIN_ATTR_TC_MASK			GENMASK(10, 8)
+#define     OB_WIN_ATTR_TC_SHIFT		8
+#define     OB_WIN_ATTR_RELAXED			BIT(7)
+#define     OB_WIN_ATTR_NOSNOOP			BIT(6)
+#define     OB_WIN_ATTR_POISON			BIT(5)
+#define     OB_WIN_ATTR_IDO			BIT(4)
+#define     OB_WIN_TYPE_MASK			GENMASK(3, 0)
+#define     OB_WIN_TYPE_SHIFT			0
+#define     OB_WIN_TYPE_MEM			0x0
+#define     OB_WIN_TYPE_IO			0x4
+#define     OB_WIN_TYPE_CONFIG_TYPE0		0x8
+#define     OB_WIN_TYPE_CONFIG_TYPE1		0x9
+#define     OB_WIN_TYPE_MSG			0xc
+
+/* LMI registers base address and register offsets */
+#define LMI_BASE_ADDR				0x6000
+#define CFG_REG					(LMI_BASE_ADDR + 0x0)
+#define     LTSSM_SHIFT				24
+#define     LTSSM_MASK				0x3f
+#define     RC_BAR_CONFIG			0x300
+
+/* LTSSM values in CFG_REG */
+enum {
+	LTSSM_DETECT_QUIET			= 0x0,
+	LTSSM_DETECT_ACTIVE			= 0x1,
+	LTSSM_POLLING_ACTIVE			= 0x2,
+	LTSSM_POLLING_COMPLIANCE		= 0x3,
+	LTSSM_POLLING_CONFIGURATION		= 0x4,
+	LTSSM_CONFIG_LINKWIDTH_START		= 0x5,
+	LTSSM_CONFIG_LINKWIDTH_ACCEPT		= 0x6,
+	LTSSM_CONFIG_LANENUM_ACCEPT		= 0x7,
+	LTSSM_CONFIG_LANENUM_WAIT		= 0x8,
+	LTSSM_CONFIG_COMPLETE			= 0x9,
+	LTSSM_CONFIG_IDLE			= 0xa,
+	LTSSM_RECOVERY_RCVR_LOCK		= 0xb,
+	LTSSM_RECOVERY_SPEED			= 0xc,
+	LTSSM_RECOVERY_RCVR_CFG			= 0xd,
+	LTSSM_RECOVERY_IDLE			= 0xe,
+	LTSSM_L0				= 0x10,
+	LTSSM_RX_L0S_ENTRY			= 0x11,
+	LTSSM_RX_L0S_IDLE			= 0x12,
+	LTSSM_RX_L0S_FTS			= 0x13,
+	LTSSM_TX_L0S_ENTRY			= 0x14,
+	LTSSM_TX_L0S_IDLE			= 0x15,
+	LTSSM_TX_L0S_FTS			= 0x16,
+	LTSSM_L1_ENTRY				= 0x17,
+	LTSSM_L1_IDLE				= 0x18,
+	LTSSM_L2_IDLE				= 0x19,
+	LTSSM_L2_TRANSMIT_WAKE			= 0x1a,
+	LTSSM_DISABLED				= 0x20,
+	LTSSM_LOOPBACK_ENTRY_MASTER		= 0x21,
+	LTSSM_LOOPBACK_ACTIVE_MASTER		= 0x22,
+	LTSSM_LOOPBACK_EXIT_MASTER		= 0x23,
+	LTSSM_LOOPBACK_ENTRY_SLAVE		= 0x24,
+	LTSSM_LOOPBACK_ACTIVE_SLAVE		= 0x25,
+	LTSSM_LOOPBACK_EXIT_SLAVE		= 0x26,
+	LTSSM_HOT_RESET				= 0x27,
+	LTSSM_RECOVERY_EQUALIZATION_PHASE0	= 0x28,
+	LTSSM_RECOVERY_EQUALIZATION_PHASE1	= 0x29,
+	LTSSM_RECOVERY_EQUALIZATION_PHASE2	= 0x2a,
+	LTSSM_RECOVERY_EQUALIZATION_PHASE3	= 0x2b,
+};
+
+#define VENDOR_ID_REG				(LMI_BASE_ADDR + 0x44)
+
+/* PCIe core controller registers */
+#define CTRL_CORE_BASE_ADDR			0x18000
+#define CTRL_CONFIG_REG				(CTRL_CORE_BASE_ADDR + 0x0)
+#define     CTRL_MODE_SHIFT			0x0
+#define     CTRL_MODE_MASK			0x1
+#define     PCIE_CORE_MODE_DIRECT		0x0
+#define     PCIE_CORE_MODE_COMMAND		0x1
+
+/* PCIe Central Interrupts Registers */
+#define CENTRAL_INT_BASE_ADDR			0x1b000
+#define HOST_CTRL_INT_STATUS_REG		(CENTRAL_INT_BASE_ADDR + 0x0)
+#define HOST_CTRL_INT_MASK_REG			(CENTRAL_INT_BASE_ADDR + 0x4)
+#define     PCIE_IRQ_CMDQ_INT			BIT(0)
+#define     PCIE_IRQ_MSI_STATUS_INT		BIT(1)
+#define     PCIE_IRQ_CMD_SENT_DONE		BIT(3)
+#define     PCIE_IRQ_DMA_INT			BIT(4)
+#define     PCIE_IRQ_IB_DXFERDONE		BIT(5)
+#define     PCIE_IRQ_OB_DXFERDONE		BIT(6)
+#define     PCIE_IRQ_OB_RXFERDONE		BIT(7)
+#define     PCIE_IRQ_COMPQ_INT			BIT(12)
+#define     PCIE_IRQ_DIR_RD_DDR_DET		BIT(13)
+#define     PCIE_IRQ_DIR_WR_DDR_DET		BIT(14)
+#define     PCIE_IRQ_CORE_INT			BIT(16)
+#define     PCIE_IRQ_CORE_INT_PIO		BIT(17)
+#define     PCIE_IRQ_DPMU_INT			BIT(18)
+#define     PCIE_IRQ_PCIE_MIS_INT		BIT(19)
+#define     PCIE_IRQ_MSI_INT1_DET		BIT(20)
+#define     PCIE_IRQ_MSI_INT2_DET		BIT(21)
+#define     PCIE_IRQ_RC_DBELL_DET		BIT(22)
+#define     PCIE_IRQ_EP_STATUS			BIT(23)
+#define     PCIE_IRQ_ALL_MASK			GENMASK(31, 0)
+#define     PCIE_IRQ_ENABLE_INTS_MASK		PCIE_IRQ_CORE_INT
+
+/* Transaction types */
+#define PCIE_CONFIG_RD_TYPE0			0x8
+#define PCIE_CONFIG_RD_TYPE1			0x9
+#define PCIE_CONFIG_WR_TYPE0			0xa
+#define PCIE_CONFIG_WR_TYPE1			0xb
+
+#define PCIE_CONF_BUS(bus)			(((bus) & 0xff) << 20)
+#define PCIE_CONF_DEV(dev)			(((dev) & 0x1f) << 15)
+#define PCIE_CONF_FUNC(fun)			(((fun) & 0x7)	<< 12)
+#define PCIE_CONF_REG(reg)			((reg) & 0xffc)
+#define PCIE_CONF_ADDR(bus, devfn, where)	\
+	(PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn))	| \
+	 PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where))
+
+#define PIO_RETRY_CNT			750000 /* 1.5 s */
+#define PIO_RETRY_DELAY			2 /* 2 us*/
+
+#define LINK_WAIT_MAX_RETRIES		10
+#define LINK_WAIT_USLEEP_MIN		90000
+#define LINK_WAIT_USLEEP_MAX		100000
+#define RETRAIN_WAIT_MAX_RETRIES	10
+#define RETRAIN_WAIT_USLEEP_US		2000
+
+#define MSI_IRQ_NUM			32
+
+#define CFG_RD_CRS_VAL			0xffff0001
+
+struct advk_pcie {
+	struct platform_device *pdev;
+	void __iomem *base;
+	struct list_head resources;
+	struct {
+		phys_addr_t match;
+		phys_addr_t remap;
+		phys_addr_t mask;
+		u32 actions;
+	} wins[OB_WIN_COUNT];
+	u8 wins_count;
+	struct irq_domain *irq_domain;
+	struct irq_chip irq_chip;
+	raw_spinlock_t irq_lock;
+	struct irq_domain *msi_domain;
+	struct irq_domain *msi_inner_domain;
+	struct irq_chip msi_bottom_irq_chip;
+	struct irq_chip msi_irq_chip;
+	struct msi_domain_info msi_domain_info;
+	DECLARE_BITMAP(msi_used, MSI_IRQ_NUM);
+	struct mutex msi_used_lock;
+	u16 msi_msg;
+	int root_bus_nr;
+	int link_gen;
+	struct pci_bridge_emul bridge;
+	struct gpio_desc *reset_gpio;
+};
+
+static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg)
+{
+	writel(val, pcie->base + reg);
+}
+
+static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg)
+{
+	return readl(pcie->base + reg);
+}
+
+static u8 advk_pcie_ltssm_state(struct advk_pcie *pcie)
+{
+	u32 val;
+	u8 ltssm_state;
+
+	val = advk_readl(pcie, CFG_REG);
+	ltssm_state = (val >> LTSSM_SHIFT) & LTSSM_MASK;
+	return ltssm_state;
+}
+
+static inline bool advk_pcie_link_up(struct advk_pcie *pcie)
+{
+	/* check if LTSSM is in normal operation - some L* state */
+	u8 ltssm_state = advk_pcie_ltssm_state(pcie);
+	return ltssm_state >= LTSSM_L0 && ltssm_state < LTSSM_DISABLED;
+}
+
+static inline bool advk_pcie_link_active(struct advk_pcie *pcie)
+{
+	/*
+	 * According to PCIe Base specification 3.0, Table 4-14: Link
+	 * Status Mapped to the LTSSM, and 4.2.6.3.6 Configuration.Idle
+	 * is Link Up mapped to LTSSM Configuration.Idle, Recovery, L0,
+	 * L0s, L1 and L2 states. And according to 3.2.1. Data Link
+	 * Control and Management State Machine Rules is DL Up status
+	 * reported in DL Active state.
+	 */
+	u8 ltssm_state = advk_pcie_ltssm_state(pcie);
+	return ltssm_state >= LTSSM_CONFIG_IDLE && ltssm_state < LTSSM_DISABLED;
+}
+
+static inline bool advk_pcie_link_training(struct advk_pcie *pcie)
+{
+	/*
+	 * According to PCIe Base specification 3.0, Table 4-14: Link
+	 * Status Mapped to the LTSSM is Link Training mapped to LTSSM
+	 * Configuration and Recovery states.
+	 */
+	u8 ltssm_state = advk_pcie_ltssm_state(pcie);
+	return ((ltssm_state >= LTSSM_CONFIG_LINKWIDTH_START &&
+		 ltssm_state < LTSSM_L0) ||
+		(ltssm_state >= LTSSM_RECOVERY_EQUALIZATION_PHASE0 &&
+		 ltssm_state <= LTSSM_RECOVERY_EQUALIZATION_PHASE3));
+}
+
+static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
+{
+	int retries;
+
+	/* check if the link is up or not */
+	for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+		if (advk_pcie_link_up(pcie))
+			return 0;
+
+		usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+	}
+
+	return -ETIMEDOUT;
+}
+
+static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie)
+{
+	size_t retries;
+
+	for (retries = 0; retries < RETRAIN_WAIT_MAX_RETRIES; ++retries) {
+		if (advk_pcie_link_training(pcie))
+			break;
+		udelay(RETRAIN_WAIT_USLEEP_US);
+	}
+}
+
+static void advk_pcie_issue_perst(struct advk_pcie *pcie)
+{
+	if (!pcie->reset_gpio)
+		return;
+
+	/* 10ms delay is needed for some cards */
+	dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n");
+	gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+	usleep_range(10000, 11000);
+	gpiod_set_value_cansleep(pcie->reset_gpio, 0);
+}
+
+static void advk_pcie_train_link(struct advk_pcie *pcie)
+{
+	struct device *dev = &pcie->pdev->dev;
+	u32 reg;
+	int ret;
+
+	/*
+	 * Setup PCIe rev / gen compliance based on device tree property
+	 * 'max-link-speed' which also forces maximal link speed.
+	 */
+	reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+	reg &= ~PCIE_GEN_SEL_MSK;
+	if (pcie->link_gen == 3)
+		reg |= SPEED_GEN_3;
+	else if (pcie->link_gen == 2)
+		reg |= SPEED_GEN_2;
+	else
+		reg |= SPEED_GEN_1;
+	advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+	/*
+	 * Set maximal link speed value also into PCIe Link Control 2 register.
+	 * Armada 3700 Functional Specification says that default value is based
+	 * on SPEED_GEN but tests showed that default value is always 8.0 GT/s.
+	 */
+	reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
+	reg &= ~PCI_EXP_LNKCTL2_TLS;
+	if (pcie->link_gen == 3)
+		reg |= PCI_EXP_LNKCTL2_TLS_8_0GT;
+	else if (pcie->link_gen == 2)
+		reg |= PCI_EXP_LNKCTL2_TLS_5_0GT;
+	else
+		reg |= PCI_EXP_LNKCTL2_TLS_2_5GT;
+	advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
+
+	/* Enable link training after selecting PCIe generation */
+	reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+	reg |= LINK_TRAINING_EN;
+	advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+	/*
+	 * Reset PCIe card via PERST# signal. Some cards are not detected
+	 * during link training when they are in some non-initial state.
+	 */
+	advk_pcie_issue_perst(pcie);
+
+	/*
+	 * PERST# signal could have been asserted by pinctrl subsystem before
+	 * probe() callback has been called or issued explicitly by reset gpio
+	 * function advk_pcie_issue_perst(), making the endpoint going into
+	 * fundamental reset. As required by PCI Express spec (PCI Express
+	 * Base Specification, REV. 4.0 PCI Express, February 19 2014, 6.6.1
+	 * Conventional Reset) a delay for at least 100ms after such a reset
+	 * before sending a Configuration Request to the device is needed.
+	 * So wait until PCIe link is up. Function advk_pcie_wait_for_link()
+	 * waits for link at least 900ms.
+	 */
+	ret = advk_pcie_wait_for_link(pcie);
+	if (ret < 0)
+		dev_err(dev, "link never came up\n");
+	else
+		dev_info(dev, "link up\n");
+}
+
+/*
+ * Set PCIe address window register which could be used for memory
+ * mapping.
+ */
+static void advk_pcie_set_ob_win(struct advk_pcie *pcie, u8 win_num,
+				 phys_addr_t match, phys_addr_t remap,
+				 phys_addr_t mask, u32 actions)
+{
+	advk_writel(pcie, OB_WIN_ENABLE |
+			  lower_32_bits(match), OB_WIN_MATCH_LS(win_num));
+	advk_writel(pcie, upper_32_bits(match), OB_WIN_MATCH_MS(win_num));
+	advk_writel(pcie, lower_32_bits(remap), OB_WIN_REMAP_LS(win_num));
+	advk_writel(pcie, upper_32_bits(remap), OB_WIN_REMAP_MS(win_num));
+	advk_writel(pcie, lower_32_bits(mask), OB_WIN_MASK_LS(win_num));
+	advk_writel(pcie, upper_32_bits(mask), OB_WIN_MASK_MS(win_num));
+	advk_writel(pcie, actions, OB_WIN_ACTIONS(win_num));
+}
+
+static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num)
+{
+	advk_writel(pcie, 0, OB_WIN_MATCH_LS(win_num));
+	advk_writel(pcie, 0, OB_WIN_MATCH_MS(win_num));
+	advk_writel(pcie, 0, OB_WIN_REMAP_LS(win_num));
+	advk_writel(pcie, 0, OB_WIN_REMAP_MS(win_num));
+	advk_writel(pcie, 0, OB_WIN_MASK_LS(win_num));
+	advk_writel(pcie, 0, OB_WIN_MASK_MS(win_num));
+	advk_writel(pcie, 0, OB_WIN_ACTIONS(win_num));
+}
+
+static void advk_pcie_setup_hw(struct advk_pcie *pcie)
+{
+	u32 reg;
+	int i;
+
+	/* Set to Direct mode */
+	reg = advk_readl(pcie, CTRL_CONFIG_REG);
+	reg &= ~(CTRL_MODE_MASK << CTRL_MODE_SHIFT);
+	reg |= ((PCIE_CORE_MODE_DIRECT & CTRL_MODE_MASK) << CTRL_MODE_SHIFT);
+	advk_writel(pcie, reg, CTRL_CONFIG_REG);
+
+	/* Set PCI global control register to RC mode */
+	reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+	reg |= (IS_RC_MSK << IS_RC_SHIFT);
+	advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+	/*
+	 * Replace incorrect PCI vendor id value 0x1b4b by correct value 0x11ab.
+	 * VENDOR_ID_REG contains vendor id in low 16 bits and subsystem vendor
+	 * id in high 16 bits. Updating this register changes readback value of
+	 * read-only vendor id bits in PCIE_CORE_DEV_ID_REG register. Workaround
+	 * for erratum 4.1: "The value of device and vendor ID is incorrect".
+	 */
+	reg = (PCI_VENDOR_ID_MARVELL << 16) | PCI_VENDOR_ID_MARVELL;
+	advk_writel(pcie, reg, VENDOR_ID_REG);
+
+	/*
+	 * Change Class Code of PCI Bridge device to PCI Bridge (0x600400),
+	 * because the default value is Mass storage controller (0x010400).
+	 *
+	 * Note that this Aardvark PCI Bridge does not have compliant Type 1
+	 * Configuration Space and it even cannot be accessed via Aardvark's
+	 * PCI config space access method. Something like config space is
+	 * available in internal Aardvark registers starting at offset 0x0
+	 * and is reported as Type 0. In range 0x10 - 0x34 it has totally
+	 * different registers.
+	 *
+	 * Therefore driver uses emulation of PCI Bridge which emulates
+	 * access to configuration space via internal Aardvark registers or
+	 * emulated configuration buffer.
+	 */
+	reg = advk_readl(pcie, PCIE_CORE_DEV_REV_REG);
+	reg &= ~0xffffff00;
+	reg |= (PCI_CLASS_BRIDGE_PCI << 8) << 8;
+	advk_writel(pcie, reg, PCIE_CORE_DEV_REV_REG);
+
+	/* Disable Root Bridge I/O space, memory space and bus mastering */
+	reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
+	reg &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+	advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
+
+	/* Set Advanced Error Capabilities and Control PF0 register */
+	reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX |
+		PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN |
+		PCIE_CORE_ERR_CAPCTL_ECRC_CHCK |
+		PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV;
+	advk_writel(pcie, reg, PCIE_CORE_ERR_CAPCTL_REG);
+
+	/* Set PCIe Device Control register */
+	reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
+	reg &= ~PCI_EXP_DEVCTL_RELAX_EN;
+	reg &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
+	reg &= ~PCI_EXP_DEVCTL_PAYLOAD;
+	reg &= ~PCI_EXP_DEVCTL_READRQ;
+	reg |= PCI_EXP_DEVCTL_PAYLOAD_512B;
+	reg |= PCI_EXP_DEVCTL_READRQ_512B;
+	advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
+
+	/* Program PCIe Control 2 to disable strict ordering */
+	reg = PCIE_CORE_CTRL2_RESERVED |
+		PCIE_CORE_CTRL2_TD_ENABLE;
+	advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
+
+	/* Set lane X1 */
+	reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+	reg &= ~LANE_CNT_MSK;
+	reg |= LANE_COUNT_1;
+	advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+	/* Enable MSI */
+	reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
+	reg |= PCIE_CORE_CTRL2_MSI_ENABLE;
+	advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
+
+	/* Clear all interrupts */
+	advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG);
+	advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG);
+	advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
+	advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
+
+	/* Disable All ISR0/1 Sources */
+	reg = PCIE_ISR0_ALL_MASK;
+	reg &= ~PCIE_ISR0_MSI_INT_PENDING;
+	advk_writel(pcie, reg, PCIE_ISR0_MASK_REG);
+
+	advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
+
+	/* Unmask all MSIs */
+	advk_writel(pcie, ~(u32)PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG);
+
+	/* Enable summary interrupt for GIC SPI source */
+	reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
+	advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG);
+
+	/*
+	 * Enable AXI address window location generation:
+	 * When it is enabled, the default outbound window
+	 * configurations (Default User Field: 0xD0074CFC)
+	 * are used to transparent address translation for
+	 * the outbound transactions. Thus, PCIe address
+	 * windows are not required for transparent memory
+	 * access when default outbound window configuration
+	 * is set for memory access.
+	 */
+	reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
+	reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE;
+	advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
+
+	/*
+	 * Set memory access in Default User Field so it
+	 * is not required to configure PCIe address for
+	 * transparent memory access.
+	 */
+	advk_writel(pcie, OB_WIN_TYPE_MEM, OB_WIN_DEFAULT_ACTIONS);
+
+	/*
+	 * Bypass the address window mapping for PIO:
+	 * Since PIO access already contains all required
+	 * info over AXI interface by PIO registers, the
+	 * address window is not required.
+	 */
+	reg = advk_readl(pcie, PIO_CTRL);
+	reg |= PIO_CTRL_ADDR_WIN_DISABLE;
+	advk_writel(pcie, reg, PIO_CTRL);
+
+	/*
+	 * Configure PCIe address windows for non-memory or
+	 * non-transparent access as by default PCIe uses
+	 * transparent memory access.
+	 */
+	for (i = 0; i < pcie->wins_count; i++)
+		advk_pcie_set_ob_win(pcie, i,
+				     pcie->wins[i].match, pcie->wins[i].remap,
+				     pcie->wins[i].mask, pcie->wins[i].actions);
+
+	/* Disable remaining PCIe outbound windows */
+	for (i = pcie->wins_count; i < OB_WIN_COUNT; i++)
+		advk_pcie_disable_ob_win(pcie, i);
+
+	advk_pcie_train_link(pcie);
+}
+
+static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val)
+{
+	struct device *dev = &pcie->pdev->dev;
+	u32 reg;
+	unsigned int status;
+	char *strcomp_status, *str_posted;
+	int ret;
+
+	reg = advk_readl(pcie, PIO_STAT);
+	status = (reg & PIO_COMPLETION_STATUS_MASK) >>
+		PIO_COMPLETION_STATUS_SHIFT;
+
+	/*
+	 * According to HW spec, the PIO status check sequence as below:
+	 * 1) even if COMPLETION_STATUS(bit9:7) indicates successful,
+	 *    it still needs to check Error Status(bit11), only when this bit
+	 *    indicates no error happen, the operation is successful.
+	 * 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only
+	 *    means a PIO write error, and for PIO read it is successful with
+	 *    a read value of 0xFFFFFFFF.
+	 * 3) value Completion Retry Status(CRS) of COMPLETION_STATUS(bit9:7)
+	 *    only means a PIO write error, and for PIO read it is successful
+	 *    with a read value of 0xFFFF0001.
+	 * 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means
+	 *    error for both PIO read and PIO write operation.
+	 * 5) other errors are indicated as 'unknown'.
+	 */
+	switch (status) {
+	case PIO_COMPLETION_STATUS_OK:
+		if (reg & PIO_ERR_STATUS) {
+			strcomp_status = "COMP_ERR";
+			ret = -EFAULT;
+			break;
+		}
+		/* Get the read result */
+		if (val)
+			*val = advk_readl(pcie, PIO_RD_DATA);
+		/* No error */
+		strcomp_status = NULL;
+		ret = 0;
+		break;
+	case PIO_COMPLETION_STATUS_UR:
+		strcomp_status = "UR";
+		ret = -EOPNOTSUPP;
+		break;
+	case PIO_COMPLETION_STATUS_CRS:
+		if (allow_crs && val) {
+			/* PCIe r4.0, sec 2.3.2, says:
+			 * If CRS Software Visibility is enabled:
+			 * For a Configuration Read Request that includes both
+			 * bytes of the Vendor ID field of a device Function's
+			 * Configuration Space Header, the Root Complex must
+			 * complete the Request to the host by returning a
+			 * read-data value of 0001h for the Vendor ID field and
+			 * all '1's for any additional bytes included in the
+			 * request.
+			 *
+			 * So CRS in this case is not an error status.
+			 */
+			*val = CFG_RD_CRS_VAL;
+			strcomp_status = NULL;
+			ret = 0;
+			break;
+		}
+		/* PCIe r4.0, sec 2.3.2, says:
+		 * If CRS Software Visibility is not enabled, the Root Complex
+		 * must re-issue the Configuration Request as a new Request.
+		 * If CRS Software Visibility is enabled: For a Configuration
+		 * Write Request or for any other Configuration Read Request,
+		 * the Root Complex must re-issue the Configuration Request as
+		 * a new Request.
+		 * A Root Complex implementation may choose to limit the number
+		 * of Configuration Request/CRS Completion Status loops before
+		 * determining that something is wrong with the target of the
+		 * Request and taking appropriate action, e.g., complete the
+		 * Request to the host as a failed transaction.
+		 *
+		 * So return -EAGAIN and caller (pci-aardvark.c driver) will
+		 * re-issue request again up to the PIO_RETRY_CNT retries.
+		 */
+		strcomp_status = "CRS";
+		ret = -EAGAIN;
+		break;
+	case PIO_COMPLETION_STATUS_CA:
+		strcomp_status = "CA";
+		ret = -ECANCELED;
+		break;
+	default:
+		strcomp_status = "Unknown";
+		ret = -EINVAL;
+		break;
+	}
+
+	if (!strcomp_status)
+		return ret;
+
+	if (reg & PIO_NON_POSTED_REQ)
+		str_posted = "Non-posted";
+	else
+		str_posted = "Posted";
+
+	dev_dbg(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
+		str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
+
+	return ret;
+}
+
+static int advk_pcie_wait_pio(struct advk_pcie *pcie)
+{
+	struct device *dev = &pcie->pdev->dev;
+	int i;
+
+	for (i = 1; i <= PIO_RETRY_CNT; i++) {
+		u32 start, isr;
+
+		start = advk_readl(pcie, PIO_START);
+		isr = advk_readl(pcie, PIO_ISR);
+		if (!start && isr)
+			return i;
+		udelay(PIO_RETRY_DELAY);
+	}
+
+	dev_err(dev, "PIO read/write transfer time out\n");
+	return -ETIMEDOUT;
+}
+
+static pci_bridge_emul_read_status_t
+advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
+				    int reg, u32 *value)
+{
+	struct advk_pcie *pcie = bridge->data;
+
+	switch (reg) {
+	case PCI_COMMAND:
+		*value = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
+		return PCI_BRIDGE_EMUL_HANDLED;
+
+	case PCI_INTERRUPT_LINE: {
+		/*
+		 * From the whole 32bit register we support reading from HW only
+		 * one bit: PCI_BRIDGE_CTL_BUS_RESET.
+		 * Other bits are retrieved only from emulated config buffer.
+		 */
+		__le32 *cfgspace = (__le32 *)&bridge->conf;
+		u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]);
+		if (advk_readl(pcie, PCIE_CORE_CTRL1_REG) & HOT_RESET_GEN)
+			val |= PCI_BRIDGE_CTL_BUS_RESET << 16;
+		else
+			val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16);
+		*value = val;
+		return PCI_BRIDGE_EMUL_HANDLED;
+	}
+
+	default:
+		return PCI_BRIDGE_EMUL_NOT_HANDLED;
+	}
+}
+
+static void
+advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
+				     int reg, u32 old, u32 new, u32 mask)
+{
+	struct advk_pcie *pcie = bridge->data;
+
+	switch (reg) {
+	case PCI_COMMAND:
+		advk_writel(pcie, new, PCIE_CORE_CMD_STATUS_REG);
+		break;
+
+	case PCI_INTERRUPT_LINE:
+		if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
+			u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG);
+			if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16))
+				val |= HOT_RESET_GEN;
+			else
+				val &= ~HOT_RESET_GEN;
+			advk_writel(pcie, val, PCIE_CORE_CTRL1_REG);
+		}
+		break;
+
+	default:
+		break;
+	}
+}
+
+static pci_bridge_emul_read_status_t
+advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
+				    int reg, u32 *value)
+{
+	struct advk_pcie *pcie = bridge->data;
+
+
+	switch (reg) {
+	case PCI_EXP_SLTCTL:
+		*value = PCI_EXP_SLTSTA_PDS << 16;
+		return PCI_BRIDGE_EMUL_HANDLED;
+
+	case PCI_EXP_RTCTL: {
+		u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+		*value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE;
+		*value |= le16_to_cpu(bridge->pcie_conf.rootctl) & PCI_EXP_RTCTL_CRSSVE;
+		*value |= PCI_EXP_RTCAP_CRSVIS << 16;
+		return PCI_BRIDGE_EMUL_HANDLED;
+	}
+
+	case PCI_EXP_RTSTA: {
+		u32 isr0 = advk_readl(pcie, PCIE_ISR0_REG);
+		u32 msglog = advk_readl(pcie, PCIE_MSG_LOG_REG);
+		*value = msglog >> 16;
+		if (isr0 & PCIE_MSG_PM_PME_MASK)
+			*value |= PCI_EXP_RTSTA_PME;
+		return PCI_BRIDGE_EMUL_HANDLED;
+	}
+
+	case PCI_EXP_LNKCAP: {
+		u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
+		/*
+		 * PCI_EXP_LNKCAP_DLLLARC bit is hardwired in aardvark HW to 0.
+		 * But support for PCI_EXP_LNKSTA_DLLLA is emulated via ltssm
+		 * state so explicitly enable PCI_EXP_LNKCAP_DLLLARC flag.
+		 */
+		val |= PCI_EXP_LNKCAP_DLLLARC;
+		*value = val;
+		return PCI_BRIDGE_EMUL_HANDLED;
+	}
+
+	case PCI_EXP_LNKCTL: {
+		/* u32 contains both PCI_EXP_LNKCTL and PCI_EXP_LNKSTA */
+		u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg) &
+			~(PCI_EXP_LNKSTA_LT << 16);
+		if (advk_pcie_link_training(pcie))
+			val |= (PCI_EXP_LNKSTA_LT << 16);
+		if (advk_pcie_link_active(pcie))
+			val |= (PCI_EXP_LNKSTA_DLLLA << 16);
+		*value = val;
+		return PCI_BRIDGE_EMUL_HANDLED;
+	}
+
+	case PCI_EXP_DEVCAP:
+	case PCI_EXP_DEVCTL:
+		*value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
+		return PCI_BRIDGE_EMUL_HANDLED;
+	default:
+		return PCI_BRIDGE_EMUL_NOT_HANDLED;
+	}
+
+}
+
+static void
+advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
+				     int reg, u32 old, u32 new, u32 mask)
+{
+	struct advk_pcie *pcie = bridge->data;
+
+	switch (reg) {
+	case PCI_EXP_DEVCTL:
+		advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
+		break;
+
+	case PCI_EXP_LNKCTL:
+		advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
+		if (new & PCI_EXP_LNKCTL_RL)
+			advk_pcie_wait_for_retrain(pcie);
+		break;
+
+	case PCI_EXP_RTCTL: {
+		/* Only mask/unmask PME interrupt */
+		u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG) &
+			~PCIE_MSG_PM_PME_MASK;
+		if ((new & PCI_EXP_RTCTL_PMEIE) == 0)
+			val |= PCIE_MSG_PM_PME_MASK;
+		advk_writel(pcie, val, PCIE_ISR0_MASK_REG);
+		break;
+	}
+
+	case PCI_EXP_RTSTA:
+		new = (new & PCI_EXP_RTSTA_PME) >> 9;
+		advk_writel(pcie, new, PCIE_ISR0_REG);
+		break;
+
+	default:
+		break;
+	}
+}
+
+static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
+	.read_base = advk_pci_bridge_emul_base_conf_read,
+	.write_base = advk_pci_bridge_emul_base_conf_write,
+	.read_pcie = advk_pci_bridge_emul_pcie_conf_read,
+	.write_pcie = advk_pci_bridge_emul_pcie_conf_write,
+};
+
+/*
+ * Initialize the configuration space of the PCI-to-PCI bridge
+ * associated with the given PCIe interface.
+ */
+static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
+{
+	struct pci_bridge_emul *bridge = &pcie->bridge;
+
+	bridge->conf.vendor =
+		cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff);
+	bridge->conf.device =
+		cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) >> 16);
+	bridge->conf.class_revision =
+		cpu_to_le32(advk_readl(pcie, PCIE_CORE_DEV_REV_REG) & 0xff);
+
+	/* Support 32 bits I/O addressing */
+	bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
+	bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
+
+	/* Support 64 bits memory pref */
+	bridge->conf.pref_mem_base = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
+	bridge->conf.pref_mem_limit = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
+
+	/* Support interrupt A for MSI feature */
+	bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE;
+
+	/* Aardvark HW provides PCIe Capability structure in version 2 */
+	bridge->pcie_conf.cap = cpu_to_le16(2);
+
+	/* Indicates supports for Completion Retry Status */
+	bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
+
+	bridge->has_pcie = true;
+	bridge->data = pcie;
+	bridge->ops = &advk_pci_bridge_emul_ops;
+
+	return pci_bridge_emul_init(bridge, 0);
+}
+
+static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
+				  int devfn)
+{
+	if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
+		return false;
+
+	/*
+	 * If the link goes down after we check for link-up, nothing bad
+	 * happens but the config access times out.
+	 */
+	if (bus->number != pcie->root_bus_nr && !advk_pcie_link_up(pcie))
+		return false;
+
+	return true;
+}
+
+static bool advk_pcie_pio_is_running(struct advk_pcie *pcie)
+{
+	struct device *dev = &pcie->pdev->dev;
+
+	/*
+	 * Trying to start a new PIO transfer when previous has not completed
+	 * cause External Abort on CPU which results in kernel panic:
+	 *
+	 *     SError Interrupt on CPU0, code 0xbf000002 -- SError
+	 *     Kernel panic - not syncing: Asynchronous SError Interrupt
+	 *
+	 * Functions advk_pcie_rd_conf() and advk_pcie_wr_conf() are protected
+	 * by raw_spin_lock_irqsave() at pci_lock_config() level to prevent
+	 * concurrent calls at the same time. But because PIO transfer may take
+	 * about 1.5s when link is down or card is disconnected, it means that
+	 * advk_pcie_wait_pio() does not always have to wait for completion.
+	 *
+	 * Some versions of ARM Trusted Firmware handles this External Abort at
+	 * EL3 level and mask it to prevent kernel panic. Relevant TF-A commit:
+	 * https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/commit/?id=3c7dcdac5c50
+	 */
+	if (advk_readl(pcie, PIO_START)) {
+		dev_err(dev, "Previous PIO read/write transfer is still running\n");
+		return true;
+	}
+
+	return false;
+}
+
+static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
+			     int where, int size, u32 *val)
+{
+	struct advk_pcie *pcie = bus->sysdata;
+	int retry_count;
+	bool allow_crs;
+	u32 reg;
+	int ret;
+
+	if (!advk_pcie_valid_device(pcie, bus, devfn)) {
+		*val = 0xffffffff;
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+
+	if (bus->number == pcie->root_bus_nr)
+		return pci_bridge_emul_conf_read(&pcie->bridge, where,
+						 size, val);
+
+	/*
+	 * Completion Retry Status is possible to return only when reading all
+	 * 4 bytes from PCI_VENDOR_ID and PCI_DEVICE_ID registers at once and
+	 * CRSSVE flag on Root Bridge is enabled.
+	 */
+	allow_crs = (where == PCI_VENDOR_ID) && (size == 4) &&
+		    (le16_to_cpu(pcie->bridge.pcie_conf.rootctl) &
+		     PCI_EXP_RTCTL_CRSSVE);
+
+	if (advk_pcie_pio_is_running(pcie))
+		goto try_crs;
+
+	/* Program the control register */
+	reg = advk_readl(pcie, PIO_CTRL);
+	reg &= ~PIO_CTRL_TYPE_MASK;
+	if (bus->primary ==  pcie->root_bus_nr)
+		reg |= PCIE_CONFIG_RD_TYPE0;
+	else
+		reg |= PCIE_CONFIG_RD_TYPE1;
+	advk_writel(pcie, reg, PIO_CTRL);
+
+	/* Program the address registers */
+	reg = PCIE_CONF_ADDR(bus->number, devfn, where);
+	advk_writel(pcie, reg, PIO_ADDR_LS);
+	advk_writel(pcie, 0, PIO_ADDR_MS);
+
+	/* Program the data strobe */
+	advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
+
+	retry_count = 0;
+	do {
+		/* Clear PIO DONE ISR and start the transfer */
+		advk_writel(pcie, 1, PIO_ISR);
+		advk_writel(pcie, 1, PIO_START);
+
+		ret = advk_pcie_wait_pio(pcie);
+		if (ret < 0)
+			goto try_crs;
+
+		retry_count += ret;
+
+		/* Check PIO status and get the read result */
+		ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
+	} while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
+
+	if (ret < 0)
+		goto fail;
+
+	if (size == 1)
+		*val = (*val >> (8 * (where & 3))) & 0xff;
+	else if (size == 2)
+		*val = (*val >> (8 * (where & 3))) & 0xffff;
+
+	return PCIBIOS_SUCCESSFUL;
+
+try_crs:
+	/*
+	 * If it is possible, return Completion Retry Status so that caller
+	 * tries to issue the request again instead of failing.
+	 */
+	if (allow_crs) {
+		*val = CFG_RD_CRS_VAL;
+		return PCIBIOS_SUCCESSFUL;
+	}
+
+fail:
+	*val = 0xffffffff;
+	return PCIBIOS_SET_FAILED;
+}
+
+static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+				int where, int size, u32 val)
+{
+	struct advk_pcie *pcie = bus->sysdata;
+	u32 reg;
+	u32 data_strobe = 0x0;
+	int retry_count;
+	int offset;
+	int ret;
+
+	if (!advk_pcie_valid_device(pcie, bus, devfn))
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	if (bus->number == pcie->root_bus_nr)
+		return pci_bridge_emul_conf_write(&pcie->bridge, where,
+						  size, val);
+
+	if (where % size)
+		return PCIBIOS_SET_FAILED;
+
+	if (advk_pcie_pio_is_running(pcie))
+		return PCIBIOS_SET_FAILED;
+
+	/* Program the control register */
+	reg = advk_readl(pcie, PIO_CTRL);
+	reg &= ~PIO_CTRL_TYPE_MASK;
+	if (bus->primary == pcie->root_bus_nr)
+		reg |= PCIE_CONFIG_WR_TYPE0;
+	else
+		reg |= PCIE_CONFIG_WR_TYPE1;
+	advk_writel(pcie, reg, PIO_CTRL);
+
+	/* Program the address registers */
+	reg = PCIE_CONF_ADDR(bus->number, devfn, where);
+	advk_writel(pcie, reg, PIO_ADDR_LS);
+	advk_writel(pcie, 0, PIO_ADDR_MS);
+
+	/* Calculate the write strobe */
+	offset      = where & 0x3;
+	reg         = val << (8 * offset);
+	data_strobe = GENMASK(size - 1, 0) << offset;
+
+	/* Program the data register */
+	advk_writel(pcie, reg, PIO_WR_DATA);
+
+	/* Program the data strobe */
+	advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
+
+	retry_count = 0;
+	do {
+		/* Clear PIO DONE ISR and start the transfer */
+		advk_writel(pcie, 1, PIO_ISR);
+		advk_writel(pcie, 1, PIO_START);
+
+		ret = advk_pcie_wait_pio(pcie);
+		if (ret < 0)
+			return PCIBIOS_SET_FAILED;
+
+		retry_count += ret;
+
+		ret = advk_pcie_check_pio_status(pcie, false, NULL);
+	} while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
+
+	return ret < 0 ? PCIBIOS_SET_FAILED : PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops advk_pcie_ops = {
+	.read = advk_pcie_rd_conf,
+	.write = advk_pcie_wr_conf,
+};
+
+static void advk_msi_irq_compose_msi_msg(struct irq_data *data,
+					 struct msi_msg *msg)
+{
+	struct advk_pcie *pcie = irq_data_get_irq_chip_data(data);
+	phys_addr_t msi_msg = virt_to_phys(&pcie->msi_msg);
+
+	msg->address_lo = lower_32_bits(msi_msg);
+	msg->address_hi = upper_32_bits(msi_msg);
+	msg->data = data->hwirq;
+}
+
+static int advk_msi_set_affinity(struct irq_data *irq_data,
+				 const struct cpumask *mask, bool force)
+{
+	return -EINVAL;
+}
+
+static int advk_msi_irq_domain_alloc(struct irq_domain *domain,
+				     unsigned int virq,
+				     unsigned int nr_irqs, void *args)
+{
+	struct advk_pcie *pcie = domain->host_data;
+	int hwirq, i;
+
+	mutex_lock(&pcie->msi_used_lock);
+	hwirq = bitmap_find_free_region(pcie->msi_used, MSI_IRQ_NUM,
+					order_base_2(nr_irqs));
+	mutex_unlock(&pcie->msi_used_lock);
+	if (hwirq < 0)
+		return -ENOSPC;
+
+	for (i = 0; i < nr_irqs; i++)
+		irq_domain_set_info(domain, virq + i, hwirq + i,
+				    &pcie->msi_bottom_irq_chip,
+				    domain->host_data, handle_simple_irq,
+				    NULL, NULL);
+
+	return 0;
+}
+
+static void advk_msi_irq_domain_free(struct irq_domain *domain,
+				     unsigned int virq, unsigned int nr_irqs)
+{
+	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+	struct advk_pcie *pcie = domain->host_data;
+
+	mutex_lock(&pcie->msi_used_lock);
+	bitmap_release_region(pcie->msi_used, d->hwirq, order_base_2(nr_irqs));
+	mutex_unlock(&pcie->msi_used_lock);
+}
+
+static const struct irq_domain_ops advk_msi_domain_ops = {
+	.alloc = advk_msi_irq_domain_alloc,
+	.free = advk_msi_irq_domain_free,
+};
+
+static void advk_pcie_irq_mask(struct irq_data *d)
+{
+	struct advk_pcie *pcie = d->domain->host_data;
+	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+	unsigned long flags;
+	u32 mask;
+
+	raw_spin_lock_irqsave(&pcie->irq_lock, flags);
+	mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
+	mask |= PCIE_ISR1_INTX_ASSERT(hwirq);
+	advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
+	raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
+}
+
+static void advk_pcie_irq_unmask(struct irq_data *d)
+{
+	struct advk_pcie *pcie = d->domain->host_data;
+	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+	unsigned long flags;
+	u32 mask;
+
+	raw_spin_lock_irqsave(&pcie->irq_lock, flags);
+	mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
+	mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq);
+	advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
+	raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
+}
+
+static int advk_pcie_irq_map(struct irq_domain *h,
+			     unsigned int virq, irq_hw_number_t hwirq)
+{
+	struct advk_pcie *pcie = h->host_data;
+
+	advk_pcie_irq_mask(irq_get_irq_data(virq));
+	irq_set_status_flags(virq, IRQ_LEVEL);
+	irq_set_chip_and_handler(virq, &pcie->irq_chip,
+				 handle_level_irq);
+	irq_set_chip_data(virq, pcie);
+
+	return 0;
+}
+
+static const struct irq_domain_ops advk_pcie_irq_domain_ops = {
+	.map = advk_pcie_irq_map,
+	.xlate = irq_domain_xlate_onecell,
+};
+
+static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
+{
+	struct device *dev = &pcie->pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct irq_chip *bottom_ic, *msi_ic;
+	struct msi_domain_info *msi_di;
+	phys_addr_t msi_msg_phys;
+
+	mutex_init(&pcie->msi_used_lock);
+
+	bottom_ic = &pcie->msi_bottom_irq_chip;
+
+	bottom_ic->name = "MSI";
+	bottom_ic->irq_compose_msi_msg = advk_msi_irq_compose_msi_msg;
+	bottom_ic->irq_set_affinity = advk_msi_set_affinity;
+
+	msi_ic = &pcie->msi_irq_chip;
+	msi_ic->name = "advk-MSI";
+
+	msi_di = &pcie->msi_domain_info;
+	msi_di->flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+		MSI_FLAG_MULTI_PCI_MSI;
+	msi_di->chip = msi_ic;
+
+	msi_msg_phys = virt_to_phys(&pcie->msi_msg);
+
+	advk_writel(pcie, lower_32_bits(msi_msg_phys),
+		    PCIE_MSI_ADDR_LOW_REG);
+	advk_writel(pcie, upper_32_bits(msi_msg_phys),
+		    PCIE_MSI_ADDR_HIGH_REG);
+
+	pcie->msi_inner_domain =
+		irq_domain_add_linear(NULL, MSI_IRQ_NUM,
+				      &advk_msi_domain_ops, pcie);
+	if (!pcie->msi_inner_domain)
+		return -ENOMEM;
+
+	pcie->msi_domain =
+		pci_msi_create_irq_domain(of_node_to_fwnode(node),
+					  msi_di, pcie->msi_inner_domain);
+	if (!pcie->msi_domain) {
+		irq_domain_remove(pcie->msi_inner_domain);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie)
+{
+	irq_domain_remove(pcie->msi_domain);
+	irq_domain_remove(pcie->msi_inner_domain);
+}
+
+static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
+{
+	struct device *dev = &pcie->pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct device_node *pcie_intc_node;
+	struct irq_chip *irq_chip;
+	int ret = 0;
+
+	raw_spin_lock_init(&pcie->irq_lock);
+
+	pcie_intc_node =  of_get_next_child(node, NULL);
+	if (!pcie_intc_node) {
+		dev_err(dev, "No PCIe Intc node found\n");
+		return -ENODEV;
+	}
+
+	irq_chip = &pcie->irq_chip;
+
+	irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-irq",
+					dev_name(dev));
+	if (!irq_chip->name) {
+		ret = -ENOMEM;
+		goto out_put_node;
+	}
+
+	irq_chip->irq_mask = advk_pcie_irq_mask;
+	irq_chip->irq_mask_ack = advk_pcie_irq_mask;
+	irq_chip->irq_unmask = advk_pcie_irq_unmask;
+
+	pcie->irq_domain =
+		irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+				      &advk_pcie_irq_domain_ops, pcie);
+	if (!pcie->irq_domain) {
+		dev_err(dev, "Failed to get a INTx IRQ domain\n");
+		ret = -ENOMEM;
+		goto out_put_node;
+	}
+
+out_put_node:
+	of_node_put(pcie_intc_node);
+	return ret;
+}
+
+static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie)
+{
+	irq_domain_remove(pcie->irq_domain);
+}
+
+static void advk_pcie_handle_msi(struct advk_pcie *pcie)
+{
+	u32 msi_val, msi_mask, msi_status, msi_idx;
+	int virq;
+
+	msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
+	msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG);
+	msi_status = msi_val & ((~msi_mask) & PCIE_MSI_ALL_MASK);
+
+	for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) {
+		if (!(BIT(msi_idx) & msi_status))
+			continue;
+
+		advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG);
+		virq = irq_find_mapping(pcie->msi_inner_domain, msi_idx);
+		generic_handle_irq(virq);
+	}
+
+	advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING,
+		    PCIE_ISR0_REG);
+}
+
+static void advk_pcie_handle_int(struct advk_pcie *pcie)
+{
+	u32 isr0_val, isr0_mask, isr0_status;
+	u32 isr1_val, isr1_mask, isr1_status;
+	int i, virq;
+
+	isr0_val = advk_readl(pcie, PCIE_ISR0_REG);
+	isr0_mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+	isr0_status = isr0_val & ((~isr0_mask) & PCIE_ISR0_ALL_MASK);
+
+	isr1_val = advk_readl(pcie, PCIE_ISR1_REG);
+	isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
+	isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK);
+
+	/* Process MSI interrupts */
+	if (isr0_status & PCIE_ISR0_MSI_INT_PENDING)
+		advk_pcie_handle_msi(pcie);
+
+	/* Process legacy interrupts */
+	for (i = 0; i < PCI_NUM_INTX; i++) {
+		if (!(isr1_status & PCIE_ISR1_INTX_ASSERT(i)))
+			continue;
+
+		advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i),
+			    PCIE_ISR1_REG);
+
+		virq = irq_find_mapping(pcie->irq_domain, i);
+		generic_handle_irq(virq);
+	}
+}
+
+static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
+{
+	struct advk_pcie *pcie = arg;
+	u32 status;
+
+	status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
+	if (!(status & PCIE_IRQ_CORE_INT))
+		return IRQ_NONE;
+
+	advk_pcie_handle_int(pcie);
+
+	/* Clear interrupt */
+	advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
+
+	return IRQ_HANDLED;
+}
+
+static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
+{
+	int err, res_valid = 0;
+	struct device *dev = &pcie->pdev->dev;
+	struct resource_entry *win, *tmp;
+	resource_size_t iobase;
+
+	INIT_LIST_HEAD(&pcie->resources);
+
+	err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+						    &pcie->resources, &iobase);
+	if (err)
+		return err;
+
+	err = devm_request_pci_bus_resources(dev, &pcie->resources);
+	if (err)
+		goto out_release_res;
+
+	resource_list_for_each_entry_safe(win, tmp, &pcie->resources) {
+		struct resource *res = win->res;
+
+		switch (resource_type(res)) {
+		case IORESOURCE_IO:
+			err = devm_pci_remap_iospace(dev, res, iobase);
+			if (err) {
+				dev_warn(dev, "error %d: failed to map resource %pR\n",
+					 err, res);
+				resource_list_destroy_entry(win);
+			}
+			break;
+		case IORESOURCE_MEM:
+			res_valid |= !(res->flags & IORESOURCE_PREFETCH);
+			break;
+		case IORESOURCE_BUS:
+			pcie->root_bus_nr = res->start;
+			break;
+		}
+	}
+
+	if (!res_valid) {
+		dev_err(dev, "non-prefetchable memory resource required\n");
+		err = -EINVAL;
+		goto out_release_res;
+	}
+
+	return 0;
+
+out_release_res:
+	pci_free_resource_list(&pcie->resources);
+	return err;
+}
+
+static int advk_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct advk_pcie *pcie;
+	struct resource *res;
+	struct pci_host_bridge *bridge;
+	struct resource_entry *entry;
+	int ret, irq;
+
+	bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
+	if (!bridge)
+		return -ENOMEM;
+
+	pcie = pci_host_bridge_priv(bridge);
+	pcie->pdev = pdev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	pcie->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pcie->base))
+		return PTR_ERR(pcie->base);
+
+	irq = platform_get_irq(pdev, 0);
+	ret = devm_request_irq(dev, irq, advk_pcie_irq_handler,
+			       IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
+			       pcie);
+	if (ret) {
+		dev_err(dev, "Failed to register interrupt\n");
+		return ret;
+	}
+
+	ret = advk_pcie_parse_request_of_pci_ranges(pcie);
+	if (ret) {
+		dev_err(dev, "Failed to parse resources\n");
+		return ret;
+	}
+
+	resource_list_for_each_entry(entry, &pcie->resources) {
+		resource_size_t start = entry->res->start;
+		resource_size_t size = resource_size(entry->res);
+		unsigned long type = resource_type(entry->res);
+		u64 win_size;
+
+		/*
+		 * Aardvark hardware allows to configure also PCIe window
+		 * for config type 0 and type 1 mapping, but driver uses
+		 * only PIO for issuing configuration transfers which does
+		 * not use PCIe window configuration.
+		 */
+		if (type != IORESOURCE_MEM && type != IORESOURCE_MEM_64 &&
+		    type != IORESOURCE_IO)
+			continue;
+
+		/*
+		 * Skip transparent memory resources. Default outbound access
+		 * configuration is set to transparent memory access so it
+		 * does not need window configuration.
+		 */
+		if ((type == IORESOURCE_MEM || type == IORESOURCE_MEM_64) &&
+		    entry->offset == 0)
+			continue;
+
+		/*
+		 * The n-th PCIe window is configured by tuple (match, remap, mask)
+		 * and an access to address A uses this window if A matches the
+		 * match with given mask.
+		 * So every PCIe window size must be a power of two and every start
+		 * address must be aligned to window size. Minimal size is 64 KiB
+		 * because lower 16 bits of mask must be zero. Remapped address
+		 * may have set only bits from the mask.
+		 */
+		while (pcie->wins_count < OB_WIN_COUNT && size > 0) {
+			/* Calculate the largest aligned window size */
+			win_size = (1ULL << (fls64(size)-1)) |
+				   (start ? (1ULL << __ffs64(start)) : 0);
+			win_size = 1ULL << __ffs64(win_size);
+			if (win_size < 0x10000)
+				break;
+
+			dev_dbg(dev,
+				"Configuring PCIe window %d: [0x%llx-0x%llx] as %lu\n",
+				pcie->wins_count, (unsigned long long)start,
+				(unsigned long long)start + win_size, type);
+
+			if (type == IORESOURCE_IO) {
+				pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_IO;
+				pcie->wins[pcie->wins_count].match = pci_pio_to_address(start);
+			} else {
+				pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_MEM;
+				pcie->wins[pcie->wins_count].match = start;
+			}
+			pcie->wins[pcie->wins_count].remap = start - entry->offset;
+			pcie->wins[pcie->wins_count].mask = ~(win_size - 1);
+
+			if (pcie->wins[pcie->wins_count].remap & (win_size - 1))
+				break;
+
+			start += win_size;
+			size -= win_size;
+			pcie->wins_count++;
+		}
+
+		if (size > 0) {
+			dev_err(&pcie->pdev->dev,
+				"Invalid PCIe region [0x%llx-0x%llx]\n",
+				(unsigned long long)entry->res->start,
+				(unsigned long long)entry->res->end + 1);
+			return -EINVAL;
+		}
+	}
+
+	pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node,
+						       "reset-gpios", 0,
+						       GPIOD_OUT_LOW,
+						       "pcie1-reset");
+	ret = PTR_ERR_OR_ZERO(pcie->reset_gpio);
+	if (ret) {
+		if (ret == -ENOENT) {
+			pcie->reset_gpio = NULL;
+		} else {
+			if (ret != -EPROBE_DEFER)
+				dev_err(dev, "Failed to get reset-gpio: %i\n",
+					ret);
+			return ret;
+		}
+	}
+
+	ret = of_pci_get_max_link_speed(dev->of_node);
+	if (ret <= 0 || ret > 3)
+		pcie->link_gen = 3;
+	else
+		pcie->link_gen = ret;
+
+	advk_pcie_setup_hw(pcie);
+
+	ret = advk_sw_pci_bridge_init(pcie);
+	if (ret) {
+		dev_err(dev, "Failed to register emulated root PCI bridge\n");
+		return ret;
+	}
+
+	ret = advk_pcie_init_irq_domain(pcie);
+	if (ret) {
+		dev_err(dev, "Failed to initialize irq\n");
+		return ret;
+	}
+
+	ret = advk_pcie_init_msi_irq_domain(pcie);
+	if (ret) {
+		dev_err(dev, "Failed to initialize irq\n");
+		advk_pcie_remove_irq_domain(pcie);
+		return ret;
+	}
+
+	list_splice_init(&pcie->resources, &bridge->windows);
+	bridge->dev.parent = dev;
+	bridge->sysdata = pcie;
+	bridge->busnr = 0;
+	bridge->ops = &advk_pcie_ops;
+	bridge->map_irq = of_irq_parse_and_map_pci;
+	bridge->swizzle_irq = pci_common_swizzle;
+
+	ret = pci_host_probe(bridge);
+	if (ret < 0) {
+		advk_pcie_remove_msi_irq_domain(pcie);
+		advk_pcie_remove_irq_domain(pcie);
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id advk_pcie_of_match_table[] = {
+	{ .compatible = "marvell,armada-3700-pcie", },
+	{},
+};
+
+static struct platform_driver advk_pcie_driver = {
+	.driver = {
+		.name = "advk-pcie",
+		.of_match_table = advk_pcie_of_match_table,
+		/* Driver unloading/unbinding currently not supported */
+		.suppress_bind_attrs = true,
+	},
+	.probe = advk_pcie_probe,
+};
+builtin_platform_driver(advk_pcie_driver);
diff --git a/marvell/linux/drivers/pci/controller/pci-ftpci100.c b/marvell/linux/drivers/pci/controller/pci-ftpci100.c
new file mode 100644
index 0000000..88983fd
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pci-ftpci100.c
@@ -0,0 +1,611 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Faraday Technology FTPC100 PCI Controller
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on the out-of-tree OpenWRT patch for Cortina Gemini:
+ * Copyright (C) 2009 Janos Laube <janos.dev@gmail.com>
+ * Copyright (C) 2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+ * Based on SL2312 PCI controller code
+ * Storlink (C) 2003
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+
+#include "../pci.h"
+
+/*
+ * Special configuration registers directly in the first few words
+ * in I/O space.
+ */
+#define PCI_IOSIZE	0x00
+#define PCI_PROT	0x04 /* AHB protection */
+#define PCI_CTRL	0x08 /* PCI control signal */
+#define PCI_SOFTRST	0x10 /* Soft reset counter and response error enable */
+#define PCI_CONFIG	0x28 /* PCI configuration command register */
+#define PCI_DATA	0x2C
+
+#define FARADAY_PCI_STATUS_CMD		0x04 /* Status and command */
+#define FARADAY_PCI_PMC			0x40 /* Power management control */
+#define FARADAY_PCI_PMCSR		0x44 /* Power management status */
+#define FARADAY_PCI_CTRL1		0x48 /* Control register 1 */
+#define FARADAY_PCI_CTRL2		0x4C /* Control register 2 */
+#define FARADAY_PCI_MEM1_BASE_SIZE	0x50 /* Memory base and size #1 */
+#define FARADAY_PCI_MEM2_BASE_SIZE	0x54 /* Memory base and size #2 */
+#define FARADAY_PCI_MEM3_BASE_SIZE	0x58 /* Memory base and size #3 */
+
+#define PCI_STATUS_66MHZ_CAPABLE	BIT(21)
+
+/* Bits 31..28 gives INTD..INTA status */
+#define PCI_CTRL2_INTSTS_SHIFT		28
+#define PCI_CTRL2_INTMASK_CMDERR	BIT(27)
+#define PCI_CTRL2_INTMASK_PARERR	BIT(26)
+/* Bits 25..22 masks INTD..INTA */
+#define PCI_CTRL2_INTMASK_SHIFT		22
+#define PCI_CTRL2_INTMASK_MABRT_RX	BIT(21)
+#define PCI_CTRL2_INTMASK_TABRT_RX	BIT(20)
+#define PCI_CTRL2_INTMASK_TABRT_TX	BIT(19)
+#define PCI_CTRL2_INTMASK_RETRY4	BIT(18)
+#define PCI_CTRL2_INTMASK_SERR_RX	BIT(17)
+#define PCI_CTRL2_INTMASK_PERR_RX	BIT(16)
+/* Bit 15 reserved */
+#define PCI_CTRL2_MSTPRI_REQ6		BIT(14)
+#define PCI_CTRL2_MSTPRI_REQ5		BIT(13)
+#define PCI_CTRL2_MSTPRI_REQ4		BIT(12)
+#define PCI_CTRL2_MSTPRI_REQ3		BIT(11)
+#define PCI_CTRL2_MSTPRI_REQ2		BIT(10)
+#define PCI_CTRL2_MSTPRI_REQ1		BIT(9)
+#define PCI_CTRL2_MSTPRI_REQ0		BIT(8)
+/* Bits 7..4 reserved */
+/* Bits 3..0 TRDYW */
+
+/*
+ * Memory configs:
+ * Bit 31..20 defines the PCI side memory base
+ * Bit 19..16 (4 bits) defines the size per below
+ */
+#define FARADAY_PCI_MEMBASE_MASK	0xfff00000
+#define FARADAY_PCI_MEMSIZE_1MB		0x0
+#define FARADAY_PCI_MEMSIZE_2MB		0x1
+#define FARADAY_PCI_MEMSIZE_4MB		0x2
+#define FARADAY_PCI_MEMSIZE_8MB		0x3
+#define FARADAY_PCI_MEMSIZE_16MB	0x4
+#define FARADAY_PCI_MEMSIZE_32MB	0x5
+#define FARADAY_PCI_MEMSIZE_64MB	0x6
+#define FARADAY_PCI_MEMSIZE_128MB	0x7
+#define FARADAY_PCI_MEMSIZE_256MB	0x8
+#define FARADAY_PCI_MEMSIZE_512MB	0x9
+#define FARADAY_PCI_MEMSIZE_1GB		0xa
+#define FARADAY_PCI_MEMSIZE_2GB		0xb
+#define FARADAY_PCI_MEMSIZE_SHIFT	16
+
+/*
+ * The DMA base is set to 0x0 for all memory segments, it reflects the
+ * fact that the memory of the host system starts at 0x0.
+ */
+#define FARADAY_PCI_DMA_MEM1_BASE	0x00000000
+#define FARADAY_PCI_DMA_MEM2_BASE	0x00000000
+#define FARADAY_PCI_DMA_MEM3_BASE	0x00000000
+
+/* Defines for PCI configuration command register */
+#define PCI_CONF_ENABLE		BIT(31)
+#define PCI_CONF_WHERE(r)	((r) & 0xFC)
+#define PCI_CONF_BUS(b)		(((b) & 0xFF) << 16)
+#define PCI_CONF_DEVICE(d)	(((d) & 0x1F) << 11)
+#define PCI_CONF_FUNCTION(f)	(((f) & 0x07) << 8)
+
+/**
+ * struct faraday_pci_variant - encodes IP block differences
+ * @cascaded_irq: this host has cascaded IRQs from an interrupt controller
+ *	embedded in the host bridge.
+ */
+struct faraday_pci_variant {
+	bool cascaded_irq;
+};
+
+struct faraday_pci {
+	struct device *dev;
+	void __iomem *base;
+	struct irq_domain *irqdomain;
+	struct pci_bus *bus;
+	struct clk *bus_clk;
+};
+
+static int faraday_res_to_memcfg(resource_size_t mem_base,
+				 resource_size_t mem_size, u32 *val)
+{
+	u32 outval;
+
+	switch (mem_size) {
+	case SZ_1M:
+		outval = FARADAY_PCI_MEMSIZE_1MB;
+		break;
+	case SZ_2M:
+		outval = FARADAY_PCI_MEMSIZE_2MB;
+		break;
+	case SZ_4M:
+		outval = FARADAY_PCI_MEMSIZE_4MB;
+		break;
+	case SZ_8M:
+		outval = FARADAY_PCI_MEMSIZE_8MB;
+		break;
+	case SZ_16M:
+		outval = FARADAY_PCI_MEMSIZE_16MB;
+		break;
+	case SZ_32M:
+		outval = FARADAY_PCI_MEMSIZE_32MB;
+		break;
+	case SZ_64M:
+		outval = FARADAY_PCI_MEMSIZE_64MB;
+		break;
+	case SZ_128M:
+		outval = FARADAY_PCI_MEMSIZE_128MB;
+		break;
+	case SZ_256M:
+		outval = FARADAY_PCI_MEMSIZE_256MB;
+		break;
+	case SZ_512M:
+		outval = FARADAY_PCI_MEMSIZE_512MB;
+		break;
+	case SZ_1G:
+		outval = FARADAY_PCI_MEMSIZE_1GB;
+		break;
+	case SZ_2G:
+		outval = FARADAY_PCI_MEMSIZE_2GB;
+		break;
+	default:
+		return -EINVAL;
+	}
+	outval <<= FARADAY_PCI_MEMSIZE_SHIFT;
+
+	/* This is probably not good */
+	if (mem_base & ~(FARADAY_PCI_MEMBASE_MASK))
+		pr_warn("truncated PCI memory base\n");
+	/* Translate to bridge side address space */
+	outval |= (mem_base & FARADAY_PCI_MEMBASE_MASK);
+	pr_debug("Translated pci base @%pap, size %pap to config %08x\n",
+		 &mem_base, &mem_size, outval);
+
+	*val = outval;
+	return 0;
+}
+
+static int faraday_raw_pci_read_config(struct faraday_pci *p, int bus_number,
+				       unsigned int fn, int config, int size,
+				       u32 *value)
+{
+	writel(PCI_CONF_BUS(bus_number) |
+			PCI_CONF_DEVICE(PCI_SLOT(fn)) |
+			PCI_CONF_FUNCTION(PCI_FUNC(fn)) |
+			PCI_CONF_WHERE(config) |
+			PCI_CONF_ENABLE,
+			p->base + PCI_CONFIG);
+
+	*value = readl(p->base + PCI_DATA);
+
+	if (size == 1)
+		*value = (*value >> (8 * (config & 3))) & 0xFF;
+	else if (size == 2)
+		*value = (*value >> (8 * (config & 3))) & 0xFFFF;
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int faraday_pci_read_config(struct pci_bus *bus, unsigned int fn,
+				   int config, int size, u32 *value)
+{
+	struct faraday_pci *p = bus->sysdata;
+
+	dev_dbg(&bus->dev,
+		"[read]  slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
+		PCI_SLOT(fn), PCI_FUNC(fn), config, size, *value);
+
+	return faraday_raw_pci_read_config(p, bus->number, fn, config, size, value);
+}
+
+static int faraday_raw_pci_write_config(struct faraday_pci *p, int bus_number,
+					 unsigned int fn, int config, int size,
+					 u32 value)
+{
+	int ret = PCIBIOS_SUCCESSFUL;
+
+	writel(PCI_CONF_BUS(bus_number) |
+			PCI_CONF_DEVICE(PCI_SLOT(fn)) |
+			PCI_CONF_FUNCTION(PCI_FUNC(fn)) |
+			PCI_CONF_WHERE(config) |
+			PCI_CONF_ENABLE,
+			p->base + PCI_CONFIG);
+
+	switch (size) {
+	case 4:
+		writel(value, p->base + PCI_DATA);
+		break;
+	case 2:
+		writew(value, p->base + PCI_DATA + (config & 3));
+		break;
+	case 1:
+		writeb(value, p->base + PCI_DATA + (config & 3));
+		break;
+	default:
+		ret = PCIBIOS_BAD_REGISTER_NUMBER;
+	}
+
+	return ret;
+}
+
+static int faraday_pci_write_config(struct pci_bus *bus, unsigned int fn,
+				    int config, int size, u32 value)
+{
+	struct faraday_pci *p = bus->sysdata;
+
+	dev_dbg(&bus->dev,
+		"[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
+		PCI_SLOT(fn), PCI_FUNC(fn), config, size, value);
+
+	return faraday_raw_pci_write_config(p, bus->number, fn, config, size,
+					    value);
+}
+
+static struct pci_ops faraday_pci_ops = {
+	.read	= faraday_pci_read_config,
+	.write	= faraday_pci_write_config,
+};
+
+static void faraday_pci_ack_irq(struct irq_data *d)
+{
+	struct faraday_pci *p = irq_data_get_irq_chip_data(d);
+	unsigned int reg;
+
+	faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, &reg);
+	reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT);
+	reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTSTS_SHIFT);
+	faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg);
+}
+
+static void faraday_pci_mask_irq(struct irq_data *d)
+{
+	struct faraday_pci *p = irq_data_get_irq_chip_data(d);
+	unsigned int reg;
+
+	faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, &reg);
+	reg &= ~((0xF << PCI_CTRL2_INTSTS_SHIFT)
+		 | BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT));
+	faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg);
+}
+
+static void faraday_pci_unmask_irq(struct irq_data *d)
+{
+	struct faraday_pci *p = irq_data_get_irq_chip_data(d);
+	unsigned int reg;
+
+	faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, &reg);
+	reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT);
+	reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT);
+	faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg);
+}
+
+static void faraday_pci_irq_handler(struct irq_desc *desc)
+{
+	struct faraday_pci *p = irq_desc_get_handler_data(desc);
+	struct irq_chip *irqchip = irq_desc_get_chip(desc);
+	unsigned int irq_stat, reg, i;
+
+	faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, &reg);
+	irq_stat = reg >> PCI_CTRL2_INTSTS_SHIFT;
+
+	chained_irq_enter(irqchip, desc);
+
+	for (i = 0; i < 4; i++) {
+		if ((irq_stat & BIT(i)) == 0)
+			continue;
+		generic_handle_irq(irq_find_mapping(p->irqdomain, i));
+	}
+
+	chained_irq_exit(irqchip, desc);
+}
+
+static struct irq_chip faraday_pci_irq_chip = {
+	.name = "PCI",
+	.irq_ack = faraday_pci_ack_irq,
+	.irq_mask = faraday_pci_mask_irq,
+	.irq_unmask = faraday_pci_unmask_irq,
+};
+
+static int faraday_pci_irq_map(struct irq_domain *domain, unsigned int irq,
+			       irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &faraday_pci_irq_chip, handle_level_irq);
+	irq_set_chip_data(irq, domain->host_data);
+
+	return 0;
+}
+
+static const struct irq_domain_ops faraday_pci_irqdomain_ops = {
+	.map = faraday_pci_irq_map,
+};
+
+static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p)
+{
+	struct device_node *intc = of_get_next_child(p->dev->of_node, NULL);
+	int irq;
+	int i;
+
+	if (!intc) {
+		dev_err(p->dev, "missing child interrupt-controller node\n");
+		return -EINVAL;
+	}
+
+	/* All PCI IRQs cascade off this one */
+	irq = of_irq_get(intc, 0);
+	if (irq <= 0) {
+		dev_err(p->dev, "failed to get parent IRQ\n");
+		of_node_put(intc);
+		return irq ?: -EINVAL;
+	}
+
+	p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX,
+					     &faraday_pci_irqdomain_ops, p);
+	of_node_put(intc);
+	if (!p->irqdomain) {
+		dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n");
+		return -EINVAL;
+	}
+
+	irq_set_chained_handler_and_data(irq, faraday_pci_irq_handler, p);
+
+	for (i = 0; i < 4; i++)
+		irq_create_mapping(p->irqdomain, i);
+
+	return 0;
+}
+
+static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p,
+					    struct device_node *np)
+{
+	struct of_pci_range range;
+	struct of_pci_range_parser parser;
+	struct device *dev = p->dev;
+	u32 confreg[3] = {
+		FARADAY_PCI_MEM1_BASE_SIZE,
+		FARADAY_PCI_MEM2_BASE_SIZE,
+		FARADAY_PCI_MEM3_BASE_SIZE,
+	};
+	int i = 0;
+	u32 val;
+
+	if (of_pci_dma_range_parser_init(&parser, np)) {
+		dev_err(dev, "missing dma-ranges property\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Get the dma-ranges from the device tree
+	 */
+	for_each_of_pci_range(&parser, &range) {
+		u64 end = range.pci_addr + range.size - 1;
+		int ret;
+
+		ret = faraday_res_to_memcfg(range.pci_addr, range.size, &val);
+		if (ret) {
+			dev_err(dev,
+				"DMA range %d: illegal MEM resource size\n", i);
+			return -EINVAL;
+		}
+
+		dev_info(dev, "DMA MEM%d BASE: 0x%016llx -> 0x%016llx config %08x\n",
+			 i + 1, range.pci_addr, end, val);
+		if (i <= 2) {
+			faraday_raw_pci_write_config(p, 0, 0, confreg[i],
+						     4, val);
+		} else {
+			dev_err(dev, "ignore extraneous dma-range %d\n", i);
+			break;
+		}
+
+		i++;
+	}
+
+	return 0;
+}
+
+static int faraday_pci_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	const struct faraday_pci_variant *variant =
+		of_device_get_match_data(dev);
+	struct resource *regs;
+	resource_size_t io_base;
+	struct resource_entry *win;
+	struct faraday_pci *p;
+	struct resource *mem;
+	struct resource *io;
+	struct pci_host_bridge *host;
+	struct clk *clk;
+	unsigned char max_bus_speed = PCI_SPEED_33MHz;
+	unsigned char cur_bus_speed = PCI_SPEED_33MHz;
+	int ret;
+	u32 val;
+	LIST_HEAD(res);
+
+	host = devm_pci_alloc_host_bridge(dev, sizeof(*p));
+	if (!host)
+		return -ENOMEM;
+
+	host->dev.parent = dev;
+	host->ops = &faraday_pci_ops;
+	host->busnr = 0;
+	host->msi = NULL;
+	host->map_irq = of_irq_parse_and_map_pci;
+	host->swizzle_irq = pci_common_swizzle;
+	p = pci_host_bridge_priv(host);
+	host->sysdata = p;
+	p->dev = dev;
+
+	/* Retrieve and enable optional clocks */
+	clk = devm_clk_get_enabled(dev, "PCLK");
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+	p->bus_clk = devm_clk_get_enabled(dev, "PCICLK");
+	if (IS_ERR(p->bus_clk))
+		return PTR_ERR(p->bus_clk);
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	p->base = devm_ioremap_resource(dev, regs);
+	if (IS_ERR(p->base))
+		return PTR_ERR(p->base);
+
+	ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+						    &res, &io_base);
+	if (ret)
+		return ret;
+
+	ret = devm_request_pci_bus_resources(dev, &res);
+	if (ret)
+		return ret;
+
+	/* Get the I/O and memory ranges from DT */
+	resource_list_for_each_entry(win, &res) {
+		switch (resource_type(win->res)) {
+		case IORESOURCE_IO:
+			io = win->res;
+			io->name = "Gemini PCI I/O";
+			if (!faraday_res_to_memcfg(io->start - win->offset,
+						   resource_size(io), &val)) {
+				/* setup I/O space size */
+				writel(val, p->base + PCI_IOSIZE);
+			} else {
+				dev_err(dev, "illegal IO mem size\n");
+				return -EINVAL;
+			}
+			ret = devm_pci_remap_iospace(dev, io, io_base);
+			if (ret) {
+				dev_warn(dev, "error %d: failed to map resource %pR\n",
+					 ret, io);
+				continue;
+			}
+			break;
+		case IORESOURCE_MEM:
+			mem = win->res;
+			mem->name = "Gemini PCI MEM";
+			break;
+		case IORESOURCE_BUS:
+			break;
+		default:
+			break;
+		}
+	}
+
+	/* Setup hostbridge */
+	val = readl(p->base + PCI_CTRL);
+	val |= PCI_COMMAND_IO;
+	val |= PCI_COMMAND_MEMORY;
+	val |= PCI_COMMAND_MASTER;
+	writel(val, p->base + PCI_CTRL);
+	/* Mask and clear all interrupts */
+	faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2 + 2, 2, 0xF000);
+	if (variant->cascaded_irq) {
+		ret = faraday_pci_setup_cascaded_irq(p);
+		if (ret) {
+			dev_err(dev, "failed to setup cascaded IRQ\n");
+			return ret;
+		}
+	}
+
+	/* Check bus clock if we can gear up to 66 MHz */
+	if (!IS_ERR(p->bus_clk)) {
+		unsigned long rate;
+		u32 val;
+
+		faraday_raw_pci_read_config(p, 0, 0,
+					    FARADAY_PCI_STATUS_CMD, 4, &val);
+		rate = clk_get_rate(p->bus_clk);
+
+		if ((rate == 33000000) && (val & PCI_STATUS_66MHZ_CAPABLE)) {
+			dev_info(dev, "33MHz bus is 66MHz capable\n");
+			max_bus_speed = PCI_SPEED_66MHz;
+			ret = clk_set_rate(p->bus_clk, 66000000);
+			if (ret)
+				dev_err(dev, "failed to set bus clock\n");
+		} else {
+			dev_info(dev, "33MHz only bus\n");
+			max_bus_speed = PCI_SPEED_33MHz;
+		}
+
+		/* Bumping the clock may fail so read back the rate */
+		rate = clk_get_rate(p->bus_clk);
+		if (rate == 33000000)
+			cur_bus_speed = PCI_SPEED_33MHz;
+		if (rate == 66000000)
+			cur_bus_speed = PCI_SPEED_66MHz;
+	}
+
+	ret = faraday_pci_parse_map_dma_ranges(p, dev->of_node);
+	if (ret)
+		return ret;
+
+	list_splice_init(&res, &host->windows);
+	ret = pci_scan_root_bus_bridge(host);
+	if (ret) {
+		dev_err(dev, "failed to scan host: %d\n", ret);
+		return ret;
+	}
+	p->bus = host->bus;
+	p->bus->max_bus_speed = max_bus_speed;
+	p->bus->cur_bus_speed = cur_bus_speed;
+
+	pci_bus_assign_resources(p->bus);
+	pci_bus_add_devices(p->bus);
+	pci_free_resource_list(&res);
+
+	return 0;
+}
+
+/*
+ * We encode bridge variants here, we have at least two so it doesn't
+ * hurt to have infrastructure to encompass future variants as well.
+ */
+static const struct faraday_pci_variant faraday_regular = {
+	.cascaded_irq = true,
+};
+
+static const struct faraday_pci_variant faraday_dual = {
+	.cascaded_irq = false,
+};
+
+static const struct of_device_id faraday_pci_of_match[] = {
+	{
+		.compatible = "faraday,ftpci100",
+		.data = &faraday_regular,
+	},
+	{
+		.compatible = "faraday,ftpci100-dual",
+		.data = &faraday_dual,
+	},
+	{},
+};
+
+static struct platform_driver faraday_pci_driver = {
+	.driver = {
+		.name = "ftpci100",
+		.of_match_table = of_match_ptr(faraday_pci_of_match),
+		.suppress_bind_attrs = true,
+	},
+	.probe  = faraday_pci_probe,
+};
+builtin_platform_driver(faraday_pci_driver);
diff --git a/marvell/linux/drivers/pci/controller/pci-host-common.c b/marvell/linux/drivers/pci/controller/pci-host-common.c
new file mode 100644
index 0000000..c8cb9c5
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pci-host-common.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic PCI host driver common code
+ *
+ * Copyright (C) 2014 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/platform_device.h>
+
+static void gen_pci_unmap_cfg(void *ptr)
+{
+	pci_ecam_free((struct pci_config_window *)ptr);
+}
+
+static struct pci_config_window *gen_pci_init(struct device *dev,
+		struct list_head *resources, struct pci_ecam_ops *ops)
+{
+	int err;
+	struct resource cfgres;
+	struct resource *bus_range = NULL;
+	struct pci_config_window *cfg;
+
+	/* Parse our PCI ranges and request their resources */
+	err = pci_parse_request_of_pci_ranges(dev, resources, &bus_range);
+	if (err)
+		return ERR_PTR(err);
+
+	err = of_address_to_resource(dev->of_node, 0, &cfgres);
+	if (err) {
+		dev_err(dev, "missing \"reg\" property\n");
+		goto err_out;
+	}
+
+	cfg = pci_ecam_create(dev, &cfgres, bus_range, ops);
+	if (IS_ERR(cfg)) {
+		err = PTR_ERR(cfg);
+		goto err_out;
+	}
+
+	err = devm_add_action_or_reset(dev, gen_pci_unmap_cfg, cfg);
+	if (err) {
+		goto err_out;
+	}
+	return cfg;
+
+err_out:
+	pci_free_resource_list(resources);
+	return ERR_PTR(err);
+}
+
+int pci_host_common_probe(struct platform_device *pdev,
+			  struct pci_ecam_ops *ops)
+{
+	struct device *dev = &pdev->dev;
+	struct pci_host_bridge *bridge;
+	struct pci_config_window *cfg;
+	struct list_head resources;
+	int ret;
+
+	bridge = devm_pci_alloc_host_bridge(dev, 0);
+	if (!bridge)
+		return -ENOMEM;
+
+	of_pci_check_probe_only();
+
+	/* Parse and map our Configuration Space windows */
+	cfg = gen_pci_init(dev, &resources, ops);
+	if (IS_ERR(cfg))
+		return PTR_ERR(cfg);
+
+	/* Do not reassign resources if probe only */
+	if (!pci_has_flag(PCI_PROBE_ONLY))
+		pci_add_flags(PCI_REASSIGN_ALL_BUS);
+
+	list_splice_init(&resources, &bridge->windows);
+	bridge->dev.parent = dev;
+	bridge->sysdata = cfg;
+	bridge->busnr = cfg->busr.start;
+	bridge->ops = &ops->pci_ops;
+	bridge->map_irq = of_irq_parse_and_map_pci;
+	bridge->swizzle_irq = pci_common_swizzle;
+
+	ret = pci_host_probe(bridge);
+	if (ret < 0) {
+		pci_free_resource_list(&resources);
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, bridge->bus);
+	return 0;
+}
+
+int pci_host_common_remove(struct platform_device *pdev)
+{
+	struct pci_bus *bus = platform_get_drvdata(pdev);
+
+	pci_lock_rescan_remove();
+	pci_stop_root_bus(bus);
+	pci_remove_root_bus(bus);
+	pci_unlock_rescan_remove();
+
+	return 0;
+}
diff --git a/marvell/linux/drivers/pci/controller/pci-host-generic.c b/marvell/linux/drivers/pci/controller/pci-host-generic.c
new file mode 100644
index 0000000..75a2fb9
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pci-host-generic.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Simple, generic PCI host controller driver targeting firmware-initialised
+ * systems and virtual machines (e.g. the PCI emulation provided by kvmtool).
+ *
+ * Copyright (C) 2014 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/platform_device.h>
+
+static struct pci_ecam_ops gen_pci_cfg_cam_bus_ops = {
+	.bus_shift	= 16,
+	.pci_ops	= {
+		.map_bus	= pci_ecam_map_bus,
+		.read		= pci_generic_config_read,
+		.write		= pci_generic_config_write,
+	}
+};
+
+static bool pci_dw_valid_device(struct pci_bus *bus, unsigned int devfn)
+{
+	struct pci_config_window *cfg = bus->sysdata;
+
+	/*
+	 * The Synopsys DesignWare PCIe controller in ECAM mode will not filter
+	 * type 0 config TLPs sent to devices 1 and up on its downstream port,
+	 * resulting in devices appearing multiple times on bus 0 unless we
+	 * filter out those accesses here.
+	 */
+	if (bus->number == cfg->busr.start && PCI_SLOT(devfn) > 0)
+		return false;
+
+	return true;
+}
+
+static void __iomem *pci_dw_ecam_map_bus(struct pci_bus *bus,
+					 unsigned int devfn, int where)
+{
+	if (!pci_dw_valid_device(bus, devfn))
+		return NULL;
+
+	return pci_ecam_map_bus(bus, devfn, where);
+}
+
+static struct pci_ecam_ops pci_dw_ecam_bus_ops = {
+	.bus_shift	= 20,
+	.pci_ops	= {
+		.map_bus	= pci_dw_ecam_map_bus,
+		.read		= pci_generic_config_read,
+		.write		= pci_generic_config_write,
+	}
+};
+
+static const struct of_device_id gen_pci_of_match[] = {
+	{ .compatible = "pci-host-cam-generic",
+	  .data = &gen_pci_cfg_cam_bus_ops },
+
+	{ .compatible = "pci-host-ecam-generic",
+	  .data = &pci_generic_ecam_ops },
+
+	{ .compatible = "marvell,armada8k-pcie-ecam",
+	  .data = &pci_dw_ecam_bus_ops },
+
+	{ .compatible = "socionext,synquacer-pcie-ecam",
+	  .data = &pci_dw_ecam_bus_ops },
+
+	{ .compatible = "snps,dw-pcie-ecam",
+	  .data = &pci_dw_ecam_bus_ops },
+
+	{ },
+};
+
+static int gen_pci_probe(struct platform_device *pdev)
+{
+	const struct of_device_id *of_id;
+	struct pci_ecam_ops *ops;
+
+	of_id = of_match_node(gen_pci_of_match, pdev->dev.of_node);
+	ops = (struct pci_ecam_ops *)of_id->data;
+
+	return pci_host_common_probe(pdev, ops);
+}
+
+static struct platform_driver gen_pci_driver = {
+	.driver = {
+		.name = "pci-host-generic",
+		.of_match_table = gen_pci_of_match,
+		.suppress_bind_attrs = true,
+	},
+	.probe = gen_pci_probe,
+	.remove = pci_host_common_remove,
+};
+builtin_platform_driver(gen_pci_driver);
diff --git a/marvell/linux/drivers/pci/controller/pci-hyperv-intf.c b/marvell/linux/drivers/pci/controller/pci-hyperv-intf.c
new file mode 100644
index 0000000..cc96be4
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pci-hyperv-intf.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) Microsoft Corporation.
+ *
+ * Author:
+ *   Haiyang Zhang <haiyangz@microsoft.com>
+ *
+ * This small module is a helper driver allows other drivers to
+ * have a common interface with the Hyper-V PCI frontend driver.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/hyperv.h>
+
+struct hyperv_pci_block_ops hvpci_block_ops;
+EXPORT_SYMBOL_GPL(hvpci_block_ops);
+
+int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len,
+			unsigned int block_id, unsigned int *bytes_returned)
+{
+	if (!hvpci_block_ops.read_block)
+		return -EOPNOTSUPP;
+
+	return hvpci_block_ops.read_block(dev, buf, buf_len, block_id,
+					  bytes_returned);
+}
+EXPORT_SYMBOL_GPL(hyperv_read_cfg_blk);
+
+int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len,
+			 unsigned int block_id)
+{
+	if (!hvpci_block_ops.write_block)
+		return -EOPNOTSUPP;
+
+	return hvpci_block_ops.write_block(dev, buf, len, block_id);
+}
+EXPORT_SYMBOL_GPL(hyperv_write_cfg_blk);
+
+int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context,
+				void (*block_invalidate)(void *context,
+							 u64 block_mask))
+{
+	if (!hvpci_block_ops.reg_blk_invalidate)
+		return -EOPNOTSUPP;
+
+	return hvpci_block_ops.reg_blk_invalidate(dev, context,
+						  block_invalidate);
+}
+EXPORT_SYMBOL_GPL(hyperv_reg_block_invalidate);
+
+static void __exit exit_hv_pci_intf(void)
+{
+}
+
+static int __init init_hv_pci_intf(void)
+{
+	return 0;
+}
+
+module_init(init_hv_pci_intf);
+module_exit(exit_hv_pci_intf);
+
+MODULE_DESCRIPTION("Hyper-V PCI Interface");
+MODULE_LICENSE("GPL v2");
diff --git a/marvell/linux/drivers/pci/controller/pci-hyperv.c b/marvell/linux/drivers/pci/controller/pci-hyperv.c
new file mode 100644
index 0000000..2c502a7
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pci-hyperv.c
@@ -0,0 +1,3224 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) Microsoft Corporation.
+ *
+ * Author:
+ *   Jake Oshins <jakeo@microsoft.com>
+ *
+ * This driver acts as a paravirtual front-end for PCI Express root buses.
+ * When a PCI Express function (either an entire device or an SR-IOV
+ * Virtual Function) is being passed through to the VM, this driver exposes
+ * a new bus to the guest VM.  This is modeled as a root PCI bus because
+ * no bridges are being exposed to the VM.  In fact, with a "Generation 2"
+ * VM within Hyper-V, there may seem to be no PCI bus at all in the VM
+ * until a device as been exposed using this driver.
+ *
+ * Each root PCI bus has its own PCI domain, which is called "Segment" in
+ * the PCI Firmware Specifications.  Thus while each device passed through
+ * to the VM using this front-end will appear at "device 0", the domain will
+ * be unique.  Typically, each bus will have one PCI function on it, though
+ * this driver does support more than one.
+ *
+ * In order to map the interrupts from the device through to the guest VM,
+ * this driver also implements an IRQ Domain, which handles interrupts (either
+ * MSI or MSI-X) associated with the functions on the bus.  As interrupts are
+ * set up, torn down, or reaffined, this driver communicates with the
+ * underlying hypervisor to adjust the mappings in the I/O MMU so that each
+ * interrupt will be delivered to the correct virtual processor at the right
+ * vector.  This driver does not support level-triggered (line-based)
+ * interrupts, and will report that the Interrupt Line register in the
+ * function's configuration space is zero.
+ *
+ * The rest of this driver mostly maps PCI concepts onto underlying Hyper-V
+ * facilities.  For instance, the configuration space of a function exposed
+ * by Hyper-V is mapped into a single page of memory space, and the
+ * read and write handlers for config space must be aware of this mechanism.
+ * Similarly, device setup and teardown involves messages sent to and from
+ * the PCI back-end driver in Hyper-V.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/semaphore.h>
+#include <linux/irqdomain.h>
+#include <asm/irqdomain.h>
+#include <asm/apic.h>
+#include <linux/irq.h>
+#include <linux/msi.h>
+#include <linux/hyperv.h>
+#include <linux/refcount.h>
+#include <asm/mshyperv.h>
+
+/*
+ * Protocol versions. The low word is the minor version, the high word the
+ * major version.
+ */
+
+#define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor)))
+#define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16)
+#define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff)
+
+enum pci_protocol_version_t {
+	PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1),	/* Win10 */
+	PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2),	/* RS1 */
+};
+
+#define CPU_AFFINITY_ALL	-1ULL
+
+/*
+ * Supported protocol versions in the order of probing - highest go
+ * first.
+ */
+static enum pci_protocol_version_t pci_protocol_versions[] = {
+	PCI_PROTOCOL_VERSION_1_2,
+	PCI_PROTOCOL_VERSION_1_1,
+};
+
+/*
+ * Protocol version negotiated by hv_pci_protocol_negotiation().
+ */
+static enum pci_protocol_version_t pci_protocol_version;
+
+#define PCI_CONFIG_MMIO_LENGTH	0x2000
+#define CFG_PAGE_OFFSET 0x1000
+#define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
+
+#define MAX_SUPPORTED_MSI_MESSAGES 0x400
+
+#define STATUS_REVISION_MISMATCH 0xC0000059
+
+/* space for 32bit serial number as string */
+#define SLOT_NAME_SIZE 11
+
+/*
+ * Message Types
+ */
+
+enum pci_message_type {
+	/*
+	 * Version 1.1
+	 */
+	PCI_MESSAGE_BASE                = 0x42490000,
+	PCI_BUS_RELATIONS               = PCI_MESSAGE_BASE + 0,
+	PCI_QUERY_BUS_RELATIONS         = PCI_MESSAGE_BASE + 1,
+	PCI_POWER_STATE_CHANGE          = PCI_MESSAGE_BASE + 4,
+	PCI_QUERY_RESOURCE_REQUIREMENTS = PCI_MESSAGE_BASE + 5,
+	PCI_QUERY_RESOURCE_RESOURCES    = PCI_MESSAGE_BASE + 6,
+	PCI_BUS_D0ENTRY                 = PCI_MESSAGE_BASE + 7,
+	PCI_BUS_D0EXIT                  = PCI_MESSAGE_BASE + 8,
+	PCI_READ_BLOCK                  = PCI_MESSAGE_BASE + 9,
+	PCI_WRITE_BLOCK                 = PCI_MESSAGE_BASE + 0xA,
+	PCI_EJECT                       = PCI_MESSAGE_BASE + 0xB,
+	PCI_QUERY_STOP                  = PCI_MESSAGE_BASE + 0xC,
+	PCI_REENABLE                    = PCI_MESSAGE_BASE + 0xD,
+	PCI_QUERY_STOP_FAILED           = PCI_MESSAGE_BASE + 0xE,
+	PCI_EJECTION_COMPLETE           = PCI_MESSAGE_BASE + 0xF,
+	PCI_RESOURCES_ASSIGNED          = PCI_MESSAGE_BASE + 0x10,
+	PCI_RESOURCES_RELEASED          = PCI_MESSAGE_BASE + 0x11,
+	PCI_INVALIDATE_BLOCK            = PCI_MESSAGE_BASE + 0x12,
+	PCI_QUERY_PROTOCOL_VERSION      = PCI_MESSAGE_BASE + 0x13,
+	PCI_CREATE_INTERRUPT_MESSAGE    = PCI_MESSAGE_BASE + 0x14,
+	PCI_DELETE_INTERRUPT_MESSAGE    = PCI_MESSAGE_BASE + 0x15,
+	PCI_RESOURCES_ASSIGNED2		= PCI_MESSAGE_BASE + 0x16,
+	PCI_CREATE_INTERRUPT_MESSAGE2	= PCI_MESSAGE_BASE + 0x17,
+	PCI_DELETE_INTERRUPT_MESSAGE2	= PCI_MESSAGE_BASE + 0x18, /* unused */
+	PCI_MESSAGE_MAXIMUM
+};
+
+/*
+ * Structures defining the virtual PCI Express protocol.
+ */
+
+union pci_version {
+	struct {
+		u16 minor_version;
+		u16 major_version;
+	} parts;
+	u32 version;
+} __packed;
+
+/*
+ * Function numbers are 8-bits wide on Express, as interpreted through ARI,
+ * which is all this driver does.  This representation is the one used in
+ * Windows, which is what is expected when sending this back and forth with
+ * the Hyper-V parent partition.
+ */
+union win_slot_encoding {
+	struct {
+		u32	dev:5;
+		u32	func:3;
+		u32	reserved:24;
+	} bits;
+	u32 slot;
+} __packed;
+
+/*
+ * Pretty much as defined in the PCI Specifications.
+ */
+struct pci_function_description {
+	u16	v_id;	/* vendor ID */
+	u16	d_id;	/* device ID */
+	u8	rev;
+	u8	prog_intf;
+	u8	subclass;
+	u8	base_class;
+	u32	subsystem_id;
+	union win_slot_encoding win_slot;
+	u32	ser;	/* serial number */
+} __packed;
+
+/**
+ * struct hv_msi_desc
+ * @vector:		IDT entry
+ * @delivery_mode:	As defined in Intel's Programmer's
+ *			Reference Manual, Volume 3, Chapter 8.
+ * @vector_count:	Number of contiguous entries in the
+ *			Interrupt Descriptor Table that are
+ *			occupied by this Message-Signaled
+ *			Interrupt. For "MSI", as first defined
+ *			in PCI 2.2, this can be between 1 and
+ *			32. For "MSI-X," as first defined in PCI
+ *			3.0, this must be 1, as each MSI-X table
+ *			entry would have its own descriptor.
+ * @reserved:		Empty space
+ * @cpu_mask:		All the target virtual processors.
+ */
+struct hv_msi_desc {
+	u8	vector;
+	u8	delivery_mode;
+	u16	vector_count;
+	u32	reserved;
+	u64	cpu_mask;
+} __packed;
+
+/**
+ * struct hv_msi_desc2 - 1.2 version of hv_msi_desc
+ * @vector:		IDT entry
+ * @delivery_mode:	As defined in Intel's Programmer's
+ *			Reference Manual, Volume 3, Chapter 8.
+ * @vector_count:	Number of contiguous entries in the
+ *			Interrupt Descriptor Table that are
+ *			occupied by this Message-Signaled
+ *			Interrupt. For "MSI", as first defined
+ *			in PCI 2.2, this can be between 1 and
+ *			32. For "MSI-X," as first defined in PCI
+ *			3.0, this must be 1, as each MSI-X table
+ *			entry would have its own descriptor.
+ * @processor_count:	number of bits enabled in array.
+ * @processor_array:	All the target virtual processors.
+ */
+struct hv_msi_desc2 {
+	u8	vector;
+	u8	delivery_mode;
+	u16	vector_count;
+	u16	processor_count;
+	u16	processor_array[32];
+} __packed;
+
+/**
+ * struct tran_int_desc
+ * @reserved:		unused, padding
+ * @vector_count:	same as in hv_msi_desc
+ * @data:		This is the "data payload" value that is
+ *			written by the device when it generates
+ *			a message-signaled interrupt, either MSI
+ *			or MSI-X.
+ * @address:		This is the address to which the data
+ *			payload is written on interrupt
+ *			generation.
+ */
+struct tran_int_desc {
+	u16	reserved;
+	u16	vector_count;
+	u32	data;
+	u64	address;
+} __packed;
+
+/*
+ * A generic message format for virtual PCI.
+ * Specific message formats are defined later in the file.
+ */
+
+struct pci_message {
+	u32 type;
+} __packed;
+
+struct pci_child_message {
+	struct pci_message message_type;
+	union win_slot_encoding wslot;
+} __packed;
+
+struct pci_incoming_message {
+	struct vmpacket_descriptor hdr;
+	struct pci_message message_type;
+} __packed;
+
+struct pci_response {
+	struct vmpacket_descriptor hdr;
+	s32 status;			/* negative values are failures */
+} __packed;
+
+struct pci_packet {
+	void (*completion_func)(void *context, struct pci_response *resp,
+				int resp_packet_size);
+	void *compl_ctxt;
+
+	struct pci_message message[0];
+};
+
+/*
+ * Specific message types supporting the PCI protocol.
+ */
+
+/*
+ * Version negotiation message. Sent from the guest to the host.
+ * The guest is free to try different versions until the host
+ * accepts the version.
+ *
+ * pci_version: The protocol version requested.
+ * is_last_attempt: If TRUE, this is the last version guest will request.
+ * reservedz: Reserved field, set to zero.
+ */
+
+struct pci_version_request {
+	struct pci_message message_type;
+	u32 protocol_version;
+} __packed;
+
+/*
+ * Bus D0 Entry.  This is sent from the guest to the host when the virtual
+ * bus (PCI Express port) is ready for action.
+ */
+
+struct pci_bus_d0_entry {
+	struct pci_message message_type;
+	u32 reserved;
+	u64 mmio_base;
+} __packed;
+
+struct pci_bus_relations {
+	struct pci_incoming_message incoming;
+	u32 device_count;
+	struct pci_function_description func[0];
+} __packed;
+
+struct pci_q_res_req_response {
+	struct vmpacket_descriptor hdr;
+	s32 status;			/* negative values are failures */
+	u32 probed_bar[6];
+} __packed;
+
+struct pci_set_power {
+	struct pci_message message_type;
+	union win_slot_encoding wslot;
+	u32 power_state;		/* In Windows terms */
+	u32 reserved;
+} __packed;
+
+struct pci_set_power_response {
+	struct vmpacket_descriptor hdr;
+	s32 status;			/* negative values are failures */
+	union win_slot_encoding wslot;
+	u32 resultant_state;		/* In Windows terms */
+	u32 reserved;
+} __packed;
+
+struct pci_resources_assigned {
+	struct pci_message message_type;
+	union win_slot_encoding wslot;
+	u8 memory_range[0x14][6];	/* not used here */
+	u32 msi_descriptors;
+	u32 reserved[4];
+} __packed;
+
+struct pci_resources_assigned2 {
+	struct pci_message message_type;
+	union win_slot_encoding wslot;
+	u8 memory_range[0x14][6];	/* not used here */
+	u32 msi_descriptor_count;
+	u8 reserved[70];
+} __packed;
+
+struct pci_create_interrupt {
+	struct pci_message message_type;
+	union win_slot_encoding wslot;
+	struct hv_msi_desc int_desc;
+} __packed;
+
+struct pci_create_int_response {
+	struct pci_response response;
+	u32 reserved;
+	struct tran_int_desc int_desc;
+} __packed;
+
+struct pci_create_interrupt2 {
+	struct pci_message message_type;
+	union win_slot_encoding wslot;
+	struct hv_msi_desc2 int_desc;
+} __packed;
+
+struct pci_delete_interrupt {
+	struct pci_message message_type;
+	union win_slot_encoding wslot;
+	struct tran_int_desc int_desc;
+} __packed;
+
+/*
+ * Note: the VM must pass a valid block id, wslot and bytes_requested.
+ */
+struct pci_read_block {
+	struct pci_message message_type;
+	u32 block_id;
+	union win_slot_encoding wslot;
+	u32 bytes_requested;
+} __packed;
+
+struct pci_read_block_response {
+	struct vmpacket_descriptor hdr;
+	u32 status;
+	u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
+} __packed;
+
+/*
+ * Note: the VM must pass a valid block id, wslot and byte_count.
+ */
+struct pci_write_block {
+	struct pci_message message_type;
+	u32 block_id;
+	union win_slot_encoding wslot;
+	u32 byte_count;
+	u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
+} __packed;
+
+struct pci_dev_inval_block {
+	struct pci_incoming_message incoming;
+	union win_slot_encoding wslot;
+	u64 block_mask;
+} __packed;
+
+struct pci_dev_incoming {
+	struct pci_incoming_message incoming;
+	union win_slot_encoding wslot;
+} __packed;
+
+struct pci_eject_response {
+	struct pci_message message_type;
+	union win_slot_encoding wslot;
+	u32 status;
+} __packed;
+
+static int pci_ring_size = (4 * PAGE_SIZE);
+
+/*
+ * Definitions or interrupt steering hypercall.
+ */
+#define HV_PARTITION_ID_SELF		((u64)-1)
+#define HVCALL_RETARGET_INTERRUPT	0x7e
+
+struct hv_interrupt_entry {
+	u32	source;			/* 1 for MSI(-X) */
+	u32	reserved1;
+	u32	address;
+	u32	data;
+};
+
+/*
+ * flags for hv_device_interrupt_target.flags
+ */
+#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST		1
+#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET	2
+
+struct hv_device_interrupt_target {
+	u32	vector;
+	u32	flags;
+	union {
+		u64		 vp_mask;
+		struct hv_vpset vp_set;
+	};
+};
+
+struct retarget_msi_interrupt {
+	u64	partition_id;		/* use "self" */
+	u64	device_id;
+	struct hv_interrupt_entry int_entry;
+	u64	reserved2;
+	struct hv_device_interrupt_target int_target;
+} __packed __aligned(8);
+
+/*
+ * Driver specific state.
+ */
+
+enum hv_pcibus_state {
+	hv_pcibus_init = 0,
+	hv_pcibus_probed,
+	hv_pcibus_installed,
+	hv_pcibus_removed,
+	hv_pcibus_maximum
+};
+
+struct hv_pcibus_device {
+	struct pci_sysdata sysdata;
+	enum hv_pcibus_state state;
+	refcount_t remove_lock;
+	struct hv_device *hdev;
+	resource_size_t low_mmio_space;
+	resource_size_t high_mmio_space;
+	struct resource *mem_config;
+	struct resource *low_mmio_res;
+	struct resource *high_mmio_res;
+	struct completion *survey_event;
+	struct completion remove_event;
+	struct pci_bus *pci_bus;
+	spinlock_t config_lock;	/* Avoid two threads writing index page */
+	spinlock_t device_list_lock;	/* Protect lists below */
+	void __iomem *cfg_addr;
+
+	struct list_head resources_for_children;
+
+	struct list_head children;
+	struct list_head dr_list;
+
+	struct msi_domain_info msi_info;
+	struct msi_controller msi_chip;
+	struct irq_domain *irq_domain;
+
+	spinlock_t retarget_msi_interrupt_lock;
+
+	struct workqueue_struct *wq;
+
+	/* hypercall arg, must not cross page boundary */
+	struct retarget_msi_interrupt retarget_msi_interrupt_params;
+
+	/*
+	 * Don't put anything here: retarget_msi_interrupt_params must be last
+	 */
+};
+
+/*
+ * Tracks "Device Relations" messages from the host, which must be both
+ * processed in order and deferred so that they don't run in the context
+ * of the incoming packet callback.
+ */
+struct hv_dr_work {
+	struct work_struct wrk;
+	struct hv_pcibus_device *bus;
+};
+
+struct hv_dr_state {
+	struct list_head list_entry;
+	u32 device_count;
+	struct pci_function_description func[0];
+};
+
+enum hv_pcichild_state {
+	hv_pcichild_init = 0,
+	hv_pcichild_requirements,
+	hv_pcichild_resourced,
+	hv_pcichild_ejecting,
+	hv_pcichild_maximum
+};
+
+struct hv_pci_dev {
+	/* List protected by pci_rescan_remove_lock */
+	struct list_head list_entry;
+	refcount_t refs;
+	enum hv_pcichild_state state;
+	struct pci_slot *pci_slot;
+	struct pci_function_description desc;
+	bool reported_missing;
+	struct hv_pcibus_device *hbus;
+	struct work_struct wrk;
+
+	void (*block_invalidate)(void *context, u64 block_mask);
+	void *invalidate_context;
+
+	/*
+	 * What would be observed if one wrote 0xFFFFFFFF to a BAR and then
+	 * read it back, for each of the BAR offsets within config space.
+	 */
+	u32 probed_bar[6];
+};
+
+struct hv_pci_compl {
+	struct completion host_event;
+	s32 completion_status;
+};
+
+static void hv_pci_onchannelcallback(void *context);
+
+/**
+ * hv_pci_generic_compl() - Invoked for a completion packet
+ * @context:		Set up by the sender of the packet.
+ * @resp:		The response packet
+ * @resp_packet_size:	Size in bytes of the packet
+ *
+ * This function is used to trigger an event and report status
+ * for any message for which the completion packet contains a
+ * status and nothing else.
+ */
+static void hv_pci_generic_compl(void *context, struct pci_response *resp,
+				 int resp_packet_size)
+{
+	struct hv_pci_compl *comp_pkt = context;
+
+	if (resp_packet_size >= offsetofend(struct pci_response, status))
+		comp_pkt->completion_status = resp->status;
+	else
+		comp_pkt->completion_status = -1;
+
+	complete(&comp_pkt->host_event);
+}
+
+static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
+						u32 wslot);
+
+static void get_pcichild(struct hv_pci_dev *hpdev)
+{
+	refcount_inc(&hpdev->refs);
+}
+
+static void put_pcichild(struct hv_pci_dev *hpdev)
+{
+	if (refcount_dec_and_test(&hpdev->refs))
+		kfree(hpdev);
+}
+
+static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus);
+static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus);
+
+/*
+ * There is no good way to get notified from vmbus_onoffer_rescind(),
+ * so let's use polling here, since this is not a hot path.
+ */
+static int wait_for_response(struct hv_device *hdev,
+			     struct completion *comp)
+{
+	while (true) {
+		if (hdev->channel->rescind) {
+			dev_warn_once(&hdev->device, "The device is gone.\n");
+			return -ENODEV;
+		}
+
+		if (wait_for_completion_timeout(comp, HZ / 10))
+			break;
+	}
+
+	return 0;
+}
+
+/**
+ * devfn_to_wslot() - Convert from Linux PCI slot to Windows
+ * @devfn:	The Linux representation of PCI slot
+ *
+ * Windows uses a slightly different representation of PCI slot.
+ *
+ * Return: The Windows representation
+ */
+static u32 devfn_to_wslot(int devfn)
+{
+	union win_slot_encoding wslot;
+
+	wslot.slot = 0;
+	wslot.bits.dev = PCI_SLOT(devfn);
+	wslot.bits.func = PCI_FUNC(devfn);
+
+	return wslot.slot;
+}
+
+/**
+ * wslot_to_devfn() - Convert from Windows PCI slot to Linux
+ * @wslot:	The Windows representation of PCI slot
+ *
+ * Windows uses a slightly different representation of PCI slot.
+ *
+ * Return: The Linux representation
+ */
+static int wslot_to_devfn(u32 wslot)
+{
+	union win_slot_encoding slot_no;
+
+	slot_no.slot = wslot;
+	return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func);
+}
+
+/*
+ * PCI Configuration Space for these root PCI buses is implemented as a pair
+ * of pages in memory-mapped I/O space.  Writing to the first page chooses
+ * the PCI function being written or read.  Once the first page has been
+ * written to, the following page maps in the entire configuration space of
+ * the function.
+ */
+
+/**
+ * _hv_pcifront_read_config() - Internal PCI config read
+ * @hpdev:	The PCI driver's representation of the device
+ * @where:	Offset within config space
+ * @size:	Size of the transfer
+ * @val:	Pointer to the buffer receiving the data
+ */
+static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
+				     int size, u32 *val)
+{
+	unsigned long flags;
+	void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where;
+
+	/*
+	 * If the attempt is to read the IDs or the ROM BAR, simulate that.
+	 */
+	if (where + size <= PCI_COMMAND) {
+		memcpy(val, ((u8 *)&hpdev->desc.v_id) + where, size);
+	} else if (where >= PCI_CLASS_REVISION && where + size <=
+		   PCI_CACHE_LINE_SIZE) {
+		memcpy(val, ((u8 *)&hpdev->desc.rev) + where -
+		       PCI_CLASS_REVISION, size);
+	} else if (where >= PCI_SUBSYSTEM_VENDOR_ID && where + size <=
+		   PCI_ROM_ADDRESS) {
+		memcpy(val, (u8 *)&hpdev->desc.subsystem_id + where -
+		       PCI_SUBSYSTEM_VENDOR_ID, size);
+	} else if (where >= PCI_ROM_ADDRESS && where + size <=
+		   PCI_CAPABILITY_LIST) {
+		/* ROM BARs are unimplemented */
+		*val = 0;
+	} else if ((where >= PCI_INTERRUPT_LINE && where + size <= PCI_INTERRUPT_PIN) ||
+		   (where >= PCI_INTERRUPT_PIN && where + size <= PCI_MIN_GNT)) {
+		/*
+		 * Interrupt Line and Interrupt PIN are hard-wired to zero
+		 * because this front-end only supports message-signaled
+		 * interrupts.
+		 */
+		*val = 0;
+	} else if (where + size <= CFG_PAGE_SIZE) {
+		spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
+		/* Choose the function to be read. (See comment above) */
+		writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
+		/* Make sure the function was chosen before we start reading. */
+		mb();
+		/* Read from that function's config space. */
+		switch (size) {
+		case 1:
+			*val = readb(addr);
+			break;
+		case 2:
+			*val = readw(addr);
+			break;
+		default:
+			*val = readl(addr);
+			break;
+		}
+		/*
+		 * Make sure the read was done before we release the spinlock
+		 * allowing consecutive reads/writes.
+		 */
+		mb();
+		spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
+	} else {
+		dev_err(&hpdev->hbus->hdev->device,
+			"Attempt to read beyond a function's config space.\n");
+	}
+}
+
+static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev)
+{
+	u16 ret;
+	unsigned long flags;
+	void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET +
+			     PCI_VENDOR_ID;
+
+	spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
+
+	/* Choose the function to be read. (See comment above) */
+	writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
+	/* Make sure the function was chosen before we start reading. */
+	mb();
+	/* Read from that function's config space. */
+	ret = readw(addr);
+	/*
+	 * mb() is not required here, because the spin_unlock_irqrestore()
+	 * is a barrier.
+	 */
+
+	spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
+
+	return ret;
+}
+
+/**
+ * _hv_pcifront_write_config() - Internal PCI config write
+ * @hpdev:	The PCI driver's representation of the device
+ * @where:	Offset within config space
+ * @size:	Size of the transfer
+ * @val:	The data being transferred
+ */
+static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where,
+				      int size, u32 val)
+{
+	unsigned long flags;
+	void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where;
+
+	if (where >= PCI_SUBSYSTEM_VENDOR_ID &&
+	    where + size <= PCI_CAPABILITY_LIST) {
+		/* SSIDs and ROM BARs are read-only */
+	} else if (where >= PCI_COMMAND && where + size <= CFG_PAGE_SIZE) {
+		spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
+		/* Choose the function to be written. (See comment above) */
+		writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
+		/* Make sure the function was chosen before we start writing. */
+		wmb();
+		/* Write to that function's config space. */
+		switch (size) {
+		case 1:
+			writeb(val, addr);
+			break;
+		case 2:
+			writew(val, addr);
+			break;
+		default:
+			writel(val, addr);
+			break;
+		}
+		/*
+		 * Make sure the write was done before we release the spinlock
+		 * allowing consecutive reads/writes.
+		 */
+		mb();
+		spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
+	} else {
+		dev_err(&hpdev->hbus->hdev->device,
+			"Attempt to write beyond a function's config space.\n");
+	}
+}
+
+/**
+ * hv_pcifront_read_config() - Read configuration space
+ * @bus: PCI Bus structure
+ * @devfn: Device/function
+ * @where: Offset from base
+ * @size: Byte/word/dword
+ * @val: Value to be read
+ *
+ * Return: PCIBIOS_SUCCESSFUL on success
+ *	   PCIBIOS_DEVICE_NOT_FOUND on failure
+ */
+static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn,
+				   int where, int size, u32 *val)
+{
+	struct hv_pcibus_device *hbus =
+		container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
+	struct hv_pci_dev *hpdev;
+
+	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
+	if (!hpdev)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	_hv_pcifront_read_config(hpdev, where, size, val);
+
+	put_pcichild(hpdev);
+	return PCIBIOS_SUCCESSFUL;
+}
+
+/**
+ * hv_pcifront_write_config() - Write configuration space
+ * @bus: PCI Bus structure
+ * @devfn: Device/function
+ * @where: Offset from base
+ * @size: Byte/word/dword
+ * @val: Value to be written to device
+ *
+ * Return: PCIBIOS_SUCCESSFUL on success
+ *	   PCIBIOS_DEVICE_NOT_FOUND on failure
+ */
+static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn,
+				    int where, int size, u32 val)
+{
+	struct hv_pcibus_device *hbus =
+	    container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
+	struct hv_pci_dev *hpdev;
+
+	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
+	if (!hpdev)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	_hv_pcifront_write_config(hpdev, where, size, val);
+
+	put_pcichild(hpdev);
+	return PCIBIOS_SUCCESSFUL;
+}
+
+/* PCIe operations */
+static struct pci_ops hv_pcifront_ops = {
+	.read  = hv_pcifront_read_config,
+	.write = hv_pcifront_write_config,
+};
+
+/*
+ * Paravirtual backchannel
+ *
+ * Hyper-V SR-IOV provides a backchannel mechanism in software for
+ * communication between a VF driver and a PF driver.  These
+ * "configuration blocks" are similar in concept to PCI configuration space,
+ * but instead of doing reads and writes in 32-bit chunks through a very slow
+ * path, packets of up to 128 bytes can be sent or received asynchronously.
+ *
+ * Nearly every SR-IOV device contains just such a communications channel in
+ * hardware, so using this one in software is usually optional.  Using the
+ * software channel, however, allows driver implementers to leverage software
+ * tools that fuzz the communications channel looking for vulnerabilities.
+ *
+ * The usage model for these packets puts the responsibility for reading or
+ * writing on the VF driver.  The VF driver sends a read or a write packet,
+ * indicating which "block" is being referred to by number.
+ *
+ * If the PF driver wishes to initiate communication, it can "invalidate" one or
+ * more of the first 64 blocks.  This invalidation is delivered via a callback
+ * supplied by the VF driver by this driver.
+ *
+ * No protocol is implied, except that supplied by the PF and VF drivers.
+ */
+
+struct hv_read_config_compl {
+	struct hv_pci_compl comp_pkt;
+	void *buf;
+	unsigned int len;
+	unsigned int bytes_returned;
+};
+
+/**
+ * hv_pci_read_config_compl() - Invoked when a response packet
+ * for a read config block operation arrives.
+ * @context:		Identifies the read config operation
+ * @resp:		The response packet itself
+ * @resp_packet_size:	Size in bytes of the response packet
+ */
+static void hv_pci_read_config_compl(void *context, struct pci_response *resp,
+				     int resp_packet_size)
+{
+	struct hv_read_config_compl *comp = context;
+	struct pci_read_block_response *read_resp =
+		(struct pci_read_block_response *)resp;
+	unsigned int data_len, hdr_len;
+
+	hdr_len = offsetof(struct pci_read_block_response, bytes);
+	if (resp_packet_size < hdr_len) {
+		comp->comp_pkt.completion_status = -1;
+		goto out;
+	}
+
+	data_len = resp_packet_size - hdr_len;
+	if (data_len > 0 && read_resp->status == 0) {
+		comp->bytes_returned = min(comp->len, data_len);
+		memcpy(comp->buf, read_resp->bytes, comp->bytes_returned);
+	} else {
+		comp->bytes_returned = 0;
+	}
+
+	comp->comp_pkt.completion_status = read_resp->status;
+out:
+	complete(&comp->comp_pkt.host_event);
+}
+
+/**
+ * hv_read_config_block() - Sends a read config block request to
+ * the back-end driver running in the Hyper-V parent partition.
+ * @pdev:		The PCI driver's representation for this device.
+ * @buf:		Buffer into which the config block will be copied.
+ * @len:		Size in bytes of buf.
+ * @block_id:		Identifies the config block which has been requested.
+ * @bytes_returned:	Size which came back from the back-end driver.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int hv_read_config_block(struct pci_dev *pdev, void *buf, unsigned int len,
+			 unsigned int block_id, unsigned int *bytes_returned)
+{
+	struct hv_pcibus_device *hbus =
+		container_of(pdev->bus->sysdata, struct hv_pcibus_device,
+			     sysdata);
+	struct {
+		struct pci_packet pkt;
+		char buf[sizeof(struct pci_read_block)];
+	} pkt;
+	struct hv_read_config_compl comp_pkt;
+	struct pci_read_block *read_blk;
+	int ret;
+
+	if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
+		return -EINVAL;
+
+	init_completion(&comp_pkt.comp_pkt.host_event);
+	comp_pkt.buf = buf;
+	comp_pkt.len = len;
+
+	memset(&pkt, 0, sizeof(pkt));
+	pkt.pkt.completion_func = hv_pci_read_config_compl;
+	pkt.pkt.compl_ctxt = &comp_pkt;
+	read_blk = (struct pci_read_block *)&pkt.pkt.message;
+	read_blk->message_type.type = PCI_READ_BLOCK;
+	read_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
+	read_blk->block_id = block_id;
+	read_blk->bytes_requested = len;
+
+	ret = vmbus_sendpacket(hbus->hdev->channel, read_blk,
+			       sizeof(*read_blk), (unsigned long)&pkt.pkt,
+			       VM_PKT_DATA_INBAND,
+			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+	if (ret)
+		return ret;
+
+	ret = wait_for_response(hbus->hdev, &comp_pkt.comp_pkt.host_event);
+	if (ret)
+		return ret;
+
+	if (comp_pkt.comp_pkt.completion_status != 0 ||
+	    comp_pkt.bytes_returned == 0) {
+		dev_err(&hbus->hdev->device,
+			"Read Config Block failed: 0x%x, bytes_returned=%d\n",
+			comp_pkt.comp_pkt.completion_status,
+			comp_pkt.bytes_returned);
+		return -EIO;
+	}
+
+	*bytes_returned = comp_pkt.bytes_returned;
+	return 0;
+}
+
+/**
+ * hv_pci_write_config_compl() - Invoked when a response packet for a write
+ * config block operation arrives.
+ * @context:		Identifies the write config operation
+ * @resp:		The response packet itself
+ * @resp_packet_size:	Size in bytes of the response packet
+ */
+static void hv_pci_write_config_compl(void *context, struct pci_response *resp,
+				      int resp_packet_size)
+{
+	struct hv_pci_compl *comp_pkt = context;
+
+	comp_pkt->completion_status = resp->status;
+	complete(&comp_pkt->host_event);
+}
+
+/**
+ * hv_write_config_block() - Sends a write config block request to the
+ * back-end driver running in the Hyper-V parent partition.
+ * @pdev:		The PCI driver's representation for this device.
+ * @buf:		Buffer from which the config block will	be copied.
+ * @len:		Size in bytes of buf.
+ * @block_id:		Identifies the config block which is being written.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int hv_write_config_block(struct pci_dev *pdev, void *buf, unsigned int len,
+			  unsigned int block_id)
+{
+	struct hv_pcibus_device *hbus =
+		container_of(pdev->bus->sysdata, struct hv_pcibus_device,
+			     sysdata);
+	struct {
+		struct pci_packet pkt;
+		char buf[sizeof(struct pci_write_block)];
+		u32 reserved;
+	} pkt;
+	struct hv_pci_compl comp_pkt;
+	struct pci_write_block *write_blk;
+	u32 pkt_size;
+	int ret;
+
+	if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
+		return -EINVAL;
+
+	init_completion(&comp_pkt.host_event);
+
+	memset(&pkt, 0, sizeof(pkt));
+	pkt.pkt.completion_func = hv_pci_write_config_compl;
+	pkt.pkt.compl_ctxt = &comp_pkt;
+	write_blk = (struct pci_write_block *)&pkt.pkt.message;
+	write_blk->message_type.type = PCI_WRITE_BLOCK;
+	write_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
+	write_blk->block_id = block_id;
+	write_blk->byte_count = len;
+	memcpy(write_blk->bytes, buf, len);
+	pkt_size = offsetof(struct pci_write_block, bytes) + len;
+	/*
+	 * This quirk is required on some hosts shipped around 2018, because
+	 * these hosts don't check the pkt_size correctly (new hosts have been
+	 * fixed since early 2019). The quirk is also safe on very old hosts
+	 * and new hosts, because, on them, what really matters is the length
+	 * specified in write_blk->byte_count.
+	 */
+	pkt_size += sizeof(pkt.reserved);
+
+	ret = vmbus_sendpacket(hbus->hdev->channel, write_blk, pkt_size,
+			       (unsigned long)&pkt.pkt, VM_PKT_DATA_INBAND,
+			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+	if (ret)
+		return ret;
+
+	ret = wait_for_response(hbus->hdev, &comp_pkt.host_event);
+	if (ret)
+		return ret;
+
+	if (comp_pkt.completion_status != 0) {
+		dev_err(&hbus->hdev->device,
+			"Write Config Block failed: 0x%x\n",
+			comp_pkt.completion_status);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/**
+ * hv_register_block_invalidate() - Invoked when a config block invalidation
+ * arrives from the back-end driver.
+ * @pdev:		The PCI driver's representation for this device.
+ * @context:		Identifies the device.
+ * @block_invalidate:	Identifies all of the blocks being invalidated.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int hv_register_block_invalidate(struct pci_dev *pdev, void *context,
+				 void (*block_invalidate)(void *context,
+							  u64 block_mask))
+{
+	struct hv_pcibus_device *hbus =
+		container_of(pdev->bus->sysdata, struct hv_pcibus_device,
+			     sysdata);
+	struct hv_pci_dev *hpdev;
+
+	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
+	if (!hpdev)
+		return -ENODEV;
+
+	hpdev->block_invalidate = block_invalidate;
+	hpdev->invalidate_context = context;
+
+	put_pcichild(hpdev);
+	return 0;
+
+}
+
+/* Interrupt management hooks */
+static void hv_int_desc_free(struct hv_pci_dev *hpdev,
+			     struct tran_int_desc *int_desc)
+{
+	struct pci_delete_interrupt *int_pkt;
+	struct {
+		struct pci_packet pkt;
+		u8 buffer[sizeof(struct pci_delete_interrupt)];
+	} ctxt;
+
+	if (!int_desc->vector_count) {
+		kfree(int_desc);
+		return;
+	}
+	memset(&ctxt, 0, sizeof(ctxt));
+	int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
+	int_pkt->message_type.type =
+		PCI_DELETE_INTERRUPT_MESSAGE;
+	int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
+	int_pkt->int_desc = *int_desc;
+	vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, sizeof(*int_pkt),
+			 (unsigned long)&ctxt.pkt, VM_PKT_DATA_INBAND, 0);
+	kfree(int_desc);
+}
+
+/**
+ * hv_msi_free() - Free the MSI.
+ * @domain:	The interrupt domain pointer
+ * @info:	Extra MSI-related context
+ * @irq:	Identifies the IRQ.
+ *
+ * The Hyper-V parent partition and hypervisor are tracking the
+ * messages that are in use, keeping the interrupt redirection
+ * table up to date.  This callback sends a message that frees
+ * the IRT entry and related tracking nonsense.
+ */
+static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
+			unsigned int irq)
+{
+	struct hv_pcibus_device *hbus;
+	struct hv_pci_dev *hpdev;
+	struct pci_dev *pdev;
+	struct tran_int_desc *int_desc;
+	struct irq_data *irq_data = irq_domain_get_irq_data(domain, irq);
+	struct msi_desc *msi = irq_data_get_msi_desc(irq_data);
+
+	pdev = msi_desc_to_pci_dev(msi);
+	hbus = info->data;
+	int_desc = irq_data_get_irq_chip_data(irq_data);
+	if (!int_desc)
+		return;
+
+	irq_data->chip_data = NULL;
+	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
+	if (!hpdev) {
+		kfree(int_desc);
+		return;
+	}
+
+	hv_int_desc_free(hpdev, int_desc);
+	put_pcichild(hpdev);
+}
+
+static int hv_set_affinity(struct irq_data *data, const struct cpumask *dest,
+			   bool force)
+{
+	struct irq_data *parent = data->parent_data;
+
+	return parent->chip->irq_set_affinity(parent, dest, force);
+}
+
+static void hv_irq_mask(struct irq_data *data)
+{
+	pci_msi_mask_irq(data);
+}
+
+static unsigned int hv_msi_get_int_vector(struct irq_data *data)
+{
+	struct irq_cfg *cfg = irqd_cfg(data);
+
+	return cfg->vector;
+}
+
+static int hv_msi_prepare(struct irq_domain *domain, struct device *dev,
+			  int nvec, msi_alloc_info_t *info)
+{
+	int ret = pci_msi_prepare(domain, dev, nvec, info);
+
+	/*
+	 * By using the interrupt remapper in the hypervisor IOMMU, contiguous
+	 * CPU vectors is not needed for multi-MSI
+	 */
+	if (info->type == X86_IRQ_ALLOC_TYPE_MSI)
+		info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
+
+	return ret;
+}
+
+/**
+ * hv_irq_unmask() - "Unmask" the IRQ by setting its current
+ * affinity.
+ * @data:	Describes the IRQ
+ *
+ * Build new a destination for the MSI and make a hypercall to
+ * update the Interrupt Redirection Table. "Device Logical ID"
+ * is built out of this PCI bus's instance GUID and the function
+ * number of the device.
+ */
+static void hv_irq_unmask(struct irq_data *data)
+{
+	struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
+	struct irq_cfg *cfg = irqd_cfg(data);
+	struct retarget_msi_interrupt *params;
+	struct tran_int_desc *int_desc;
+	struct hv_pcibus_device *hbus;
+	struct cpumask *dest;
+	cpumask_var_t tmp;
+	struct pci_bus *pbus;
+	struct pci_dev *pdev;
+	unsigned long flags;
+	u32 var_size = 0;
+	int cpu, nr_bank;
+	u64 res;
+
+	dest = irq_data_get_effective_affinity_mask(data);
+	pdev = msi_desc_to_pci_dev(msi_desc);
+	pbus = pdev->bus;
+	hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
+	int_desc = data->chip_data;
+
+	spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags);
+
+	params = &hbus->retarget_msi_interrupt_params;
+	memset(params, 0, sizeof(*params));
+	params->partition_id = HV_PARTITION_ID_SELF;
+	params->int_entry.source = 1; /* MSI(-X) */
+	params->int_entry.address = int_desc->address & 0xffffffff;
+	params->int_entry.data = int_desc->data;
+	params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
+			   (hbus->hdev->dev_instance.b[4] << 16) |
+			   (hbus->hdev->dev_instance.b[7] << 8) |
+			   (hbus->hdev->dev_instance.b[6] & 0xf8) |
+			   PCI_FUNC(pdev->devfn);
+	params->int_target.vector = cfg->vector;
+
+	/*
+	 * Honoring apic->irq_delivery_mode set to dest_Fixed by
+	 * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a
+	 * spurious interrupt storm. Not doing so does not seem to have a
+	 * negative effect (yet?).
+	 */
+
+	if (pci_protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
+		/*
+		 * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
+		 * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
+		 * with >64 VP support.
+		 * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
+		 * is not sufficient for this hypercall.
+		 */
+		params->int_target.flags |=
+			HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
+
+		if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) {
+			res = 1;
+			goto exit_unlock;
+		}
+
+		cpumask_and(tmp, dest, cpu_online_mask);
+		nr_bank = cpumask_to_vpset(&params->int_target.vp_set, tmp);
+		free_cpumask_var(tmp);
+
+		if (nr_bank <= 0) {
+			res = 1;
+			goto exit_unlock;
+		}
+
+		/*
+		 * var-sized hypercall, var-size starts after vp_mask (thus
+		 * vp_set.format does not count, but vp_set.valid_bank_mask
+		 * does).
+		 */
+		var_size = 1 + nr_bank;
+	} else {
+		for_each_cpu_and(cpu, dest, cpu_online_mask) {
+			params->int_target.vp_mask |=
+				(1ULL << hv_cpu_number_to_vp_number(cpu));
+		}
+	}
+
+	res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
+			      params, NULL);
+
+exit_unlock:
+	spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags);
+
+	if (res) {
+		dev_err(&hbus->hdev->device,
+			"%s() failed: %#llx", __func__, res);
+		return;
+	}
+
+	pci_msi_unmask_irq(data);
+}
+
+struct compose_comp_ctxt {
+	struct hv_pci_compl comp_pkt;
+	struct tran_int_desc int_desc;
+};
+
+static void hv_pci_compose_compl(void *context, struct pci_response *resp,
+				 int resp_packet_size)
+{
+	struct compose_comp_ctxt *comp_pkt = context;
+	struct pci_create_int_response *int_resp =
+		(struct pci_create_int_response *)resp;
+
+	comp_pkt->comp_pkt.completion_status = resp->status;
+	comp_pkt->int_desc = int_resp->int_desc;
+	complete(&comp_pkt->comp_pkt.host_event);
+}
+
+static u32 hv_compose_msi_req_v1(
+	struct pci_create_interrupt *int_pkt, struct cpumask *affinity,
+	u32 slot, u8 vector, u8 vector_count)
+{
+	int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
+	int_pkt->wslot.slot = slot;
+	int_pkt->int_desc.vector = vector;
+	int_pkt->int_desc.vector_count = vector_count;
+	int_pkt->int_desc.delivery_mode = dest_Fixed;
+
+	/*
+	 * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
+	 * hv_irq_unmask().
+	 */
+	int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
+
+	return sizeof(*int_pkt);
+}
+
+static u32 hv_compose_msi_req_v2(
+	struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity,
+	u32 slot, u8 vector, u8 vector_count)
+{
+	int cpu;
+
+	int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
+	int_pkt->wslot.slot = slot;
+	int_pkt->int_desc.vector = vector;
+	int_pkt->int_desc.vector_count = vector_count;
+	int_pkt->int_desc.delivery_mode = dest_Fixed;
+
+	/*
+	 * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
+	 * by subsequent retarget in hv_irq_unmask().
+	 */
+	cpu = cpumask_first_and(affinity, cpu_online_mask);
+	int_pkt->int_desc.processor_array[0] =
+		hv_cpu_number_to_vp_number(cpu);
+	int_pkt->int_desc.processor_count = 1;
+
+	return sizeof(*int_pkt);
+}
+
+/**
+ * hv_compose_msi_msg() - Supplies a valid MSI address/data
+ * @data:	Everything about this MSI
+ * @msg:	Buffer that is filled in by this function
+ *
+ * This function unpacks the IRQ looking for target CPU set, IDT
+ * vector and mode and sends a message to the parent partition
+ * asking for a mapping for that tuple in this partition.  The
+ * response supplies a data value and address to which that data
+ * should be written to trigger that interrupt.
+ */
+static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+	struct hv_pcibus_device *hbus;
+	struct hv_pci_dev *hpdev;
+	struct pci_bus *pbus;
+	struct pci_dev *pdev;
+	struct cpumask *dest;
+	unsigned long flags;
+	struct compose_comp_ctxt comp;
+	struct tran_int_desc *int_desc;
+	struct msi_desc *msi_desc;
+	u8 vector, vector_count;
+	struct {
+		struct pci_packet pci_pkt;
+		union {
+			struct pci_create_interrupt v1;
+			struct pci_create_interrupt2 v2;
+		} int_pkts;
+	} __packed ctxt;
+
+	u32 size;
+	int ret;
+
+	/* Reuse the previous allocation */
+	if (data->chip_data) {
+		int_desc = data->chip_data;
+		msg->address_hi = int_desc->address >> 32;
+		msg->address_lo = int_desc->address & 0xffffffff;
+		msg->data = int_desc->data;
+		return;
+	}
+
+	msi_desc  = irq_data_get_msi_desc(data);
+	pdev = msi_desc_to_pci_dev(msi_desc);
+	dest = irq_data_get_effective_affinity_mask(data);
+	pbus = pdev->bus;
+	hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
+	hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
+	if (!hpdev)
+		goto return_null_message;
+
+	int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
+	if (!int_desc)
+		goto drop_reference;
+
+	if (!msi_desc->msi_attrib.is_msix && msi_desc->nvec_used > 1) {
+		/*
+		 * If this is not the first MSI of Multi MSI, we already have
+		 * a mapping.  Can exit early.
+		 */
+		if (msi_desc->irq != data->irq) {
+			data->chip_data = int_desc;
+			int_desc->address = msi_desc->msg.address_lo |
+					    (u64)msi_desc->msg.address_hi << 32;
+			int_desc->data = msi_desc->msg.data +
+					 (data->irq - msi_desc->irq);
+			msg->address_hi = msi_desc->msg.address_hi;
+			msg->address_lo = msi_desc->msg.address_lo;
+			msg->data = int_desc->data;
+			put_pcichild(hpdev);
+			return;
+		}
+		/*
+		 * The vector we select here is a dummy value.  The correct
+		 * value gets sent to the hypervisor in unmask().  This needs
+		 * to be aligned with the count, and also not zero.  Multi-msi
+		 * is powers of 2 up to 32, so 32 will always work here.
+		 */
+		vector = 32;
+		vector_count = msi_desc->nvec_used;
+	} else {
+		vector = hv_msi_get_int_vector(data);
+		vector_count = 1;
+	}
+
+	memset(&ctxt, 0, sizeof(ctxt));
+	init_completion(&comp.comp_pkt.host_event);
+	ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
+	ctxt.pci_pkt.compl_ctxt = &comp;
+
+	switch (pci_protocol_version) {
+	case PCI_PROTOCOL_VERSION_1_1:
+		size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
+					dest,
+					hpdev->desc.win_slot.slot,
+					vector,
+					vector_count);
+		break;
+
+	case PCI_PROTOCOL_VERSION_1_2:
+		size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
+					dest,
+					hpdev->desc.win_slot.slot,
+					vector,
+					vector_count);
+		break;
+
+	default:
+		/* As we only negotiate protocol versions known to this driver,
+		 * this path should never hit. However, this is it not a hot
+		 * path so we print a message to aid future updates.
+		 */
+		dev_err(&hbus->hdev->device,
+			"Unexpected vPCI protocol, update driver.");
+		goto free_int_desc;
+	}
+
+	ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, &ctxt.int_pkts,
+			       size, (unsigned long)&ctxt.pci_pkt,
+			       VM_PKT_DATA_INBAND,
+			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+	if (ret) {
+		dev_err(&hbus->hdev->device,
+			"Sending request for interrupt failed: 0x%x",
+			comp.comp_pkt.completion_status);
+		goto free_int_desc;
+	}
+
+	/*
+	 * Since this function is called with IRQ locks held, can't
+	 * do normal wait for completion; instead poll.
+	 */
+	while (!try_wait_for_completion(&comp.comp_pkt.host_event)) {
+		/* 0xFFFF means an invalid PCI VENDOR ID. */
+		if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) {
+			dev_err_once(&hbus->hdev->device,
+				     "the device has gone\n");
+			goto free_int_desc;
+		}
+
+		/*
+		 * When the higher level interrupt code calls us with
+		 * interrupt disabled, we must poll the channel by calling
+		 * the channel callback directly when channel->target_cpu is
+		 * the current CPU. When the higher level interrupt code
+		 * calls us with interrupt enabled, let's add the
+		 * local_irq_save()/restore() to avoid race:
+		 * hv_pci_onchannelcallback() can also run in tasklet.
+		 */
+		local_irq_save(flags);
+
+		if (hbus->hdev->channel->target_cpu == smp_processor_id())
+			hv_pci_onchannelcallback(hbus);
+
+		local_irq_restore(flags);
+
+		if (hpdev->state == hv_pcichild_ejecting) {
+			dev_err_once(&hbus->hdev->device,
+				     "the device is being ejected\n");
+			goto free_int_desc;
+		}
+
+		udelay(100);
+	}
+
+	if (comp.comp_pkt.completion_status < 0) {
+		dev_err(&hbus->hdev->device,
+			"Request for interrupt failed: 0x%x",
+			comp.comp_pkt.completion_status);
+		goto free_int_desc;
+	}
+
+	/*
+	 * Record the assignment so that this can be unwound later. Using
+	 * irq_set_chip_data() here would be appropriate, but the lock it takes
+	 * is already held.
+	 */
+	*int_desc = comp.int_desc;
+	data->chip_data = int_desc;
+
+	/* Pass up the result. */
+	msg->address_hi = comp.int_desc.address >> 32;
+	msg->address_lo = comp.int_desc.address & 0xffffffff;
+	msg->data = comp.int_desc.data;
+
+	put_pcichild(hpdev);
+	return;
+
+free_int_desc:
+	kfree(int_desc);
+drop_reference:
+	put_pcichild(hpdev);
+return_null_message:
+	msg->address_hi = 0;
+	msg->address_lo = 0;
+	msg->data = 0;
+}
+
+/* HW Interrupt Chip Descriptor */
+static struct irq_chip hv_msi_irq_chip = {
+	.name			= "Hyper-V PCIe MSI",
+	.irq_compose_msi_msg	= hv_compose_msi_msg,
+	.irq_set_affinity	= hv_set_affinity,
+	.irq_ack		= irq_chip_ack_parent,
+	.irq_mask		= hv_irq_mask,
+	.irq_unmask		= hv_irq_unmask,
+};
+
+static irq_hw_number_t hv_msi_domain_ops_get_hwirq(struct msi_domain_info *info,
+						   msi_alloc_info_t *arg)
+{
+	return arg->msi_hwirq;
+}
+
+static struct msi_domain_ops hv_msi_ops = {
+	.get_hwirq	= hv_msi_domain_ops_get_hwirq,
+	.msi_prepare	= hv_msi_prepare,
+	.set_desc	= pci_msi_set_desc,
+	.msi_free	= hv_msi_free,
+};
+
+/**
+ * hv_pcie_init_irq_domain() - Initialize IRQ domain
+ * @hbus:	The root PCI bus
+ *
+ * This function creates an IRQ domain which will be used for
+ * interrupts from devices that have been passed through.  These
+ * devices only support MSI and MSI-X, not line-based interrupts
+ * or simulations of line-based interrupts through PCIe's
+ * fabric-layer messages.  Because interrupts are remapped, we
+ * can support multi-message MSI here.
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
+{
+	hbus->msi_info.chip = &hv_msi_irq_chip;
+	hbus->msi_info.ops = &hv_msi_ops;
+	hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS |
+		MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI |
+		MSI_FLAG_PCI_MSIX);
+	hbus->msi_info.handler = handle_edge_irq;
+	hbus->msi_info.handler_name = "edge";
+	hbus->msi_info.data = hbus;
+	hbus->irq_domain = pci_msi_create_irq_domain(hbus->sysdata.fwnode,
+						     &hbus->msi_info,
+						     x86_vector_domain);
+	if (!hbus->irq_domain) {
+		dev_err(&hbus->hdev->device,
+			"Failed to build an MSI IRQ domain\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/**
+ * get_bar_size() - Get the address space consumed by a BAR
+ * @bar_val:	Value that a BAR returned after -1 was written
+ *              to it.
+ *
+ * This function returns the size of the BAR, rounded up to 1
+ * page.  It has to be rounded up because the hypervisor's page
+ * table entry that maps the BAR into the VM can't specify an
+ * offset within a page.  The invariant is that the hypervisor
+ * must place any BARs of smaller than page length at the
+ * beginning of a page.
+ *
+ * Return:	Size in bytes of the consumed MMIO space.
+ */
+static u64 get_bar_size(u64 bar_val)
+{
+	return round_up((1 + ~(bar_val & PCI_BASE_ADDRESS_MEM_MASK)),
+			PAGE_SIZE);
+}
+
+/**
+ * survey_child_resources() - Total all MMIO requirements
+ * @hbus:	Root PCI bus, as understood by this driver
+ */
+static void survey_child_resources(struct hv_pcibus_device *hbus)
+{
+	struct hv_pci_dev *hpdev;
+	resource_size_t bar_size = 0;
+	unsigned long flags;
+	struct completion *event;
+	u64 bar_val;
+	int i;
+
+	/* If nobody is waiting on the answer, don't compute it. */
+	event = xchg(&hbus->survey_event, NULL);
+	if (!event)
+		return;
+
+	/* If the answer has already been computed, go with it. */
+	if (hbus->low_mmio_space || hbus->high_mmio_space) {
+		complete(event);
+		return;
+	}
+
+	spin_lock_irqsave(&hbus->device_list_lock, flags);
+
+	/*
+	 * Due to an interesting quirk of the PCI spec, all memory regions
+	 * for a child device are a power of 2 in size and aligned in memory,
+	 * so it's sufficient to just add them up without tracking alignment.
+	 */
+	list_for_each_entry(hpdev, &hbus->children, list_entry) {
+		for (i = 0; i < 6; i++) {
+			if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
+				dev_err(&hbus->hdev->device,
+					"There's an I/O BAR in this list!\n");
+
+			if (hpdev->probed_bar[i] != 0) {
+				/*
+				 * A probed BAR has all the upper bits set that
+				 * can be changed.
+				 */
+
+				bar_val = hpdev->probed_bar[i];
+				if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
+					bar_val |=
+					((u64)hpdev->probed_bar[++i] << 32);
+				else
+					bar_val |= 0xffffffff00000000ULL;
+
+				bar_size = get_bar_size(bar_val);
+
+				if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
+					hbus->high_mmio_space += bar_size;
+				else
+					hbus->low_mmio_space += bar_size;
+			}
+		}
+	}
+
+	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+	complete(event);
+}
+
+/**
+ * prepopulate_bars() - Fill in BARs with defaults
+ * @hbus:	Root PCI bus, as understood by this driver
+ *
+ * The core PCI driver code seems much, much happier if the BARs
+ * for a device have values upon first scan. So fill them in.
+ * The algorithm below works down from large sizes to small,
+ * attempting to pack the assignments optimally. The assumption,
+ * enforced in other parts of the code, is that the beginning of
+ * the memory-mapped I/O space will be aligned on the largest
+ * BAR size.
+ */
+static void prepopulate_bars(struct hv_pcibus_device *hbus)
+{
+	resource_size_t high_size = 0;
+	resource_size_t low_size = 0;
+	resource_size_t high_base = 0;
+	resource_size_t low_base = 0;
+	resource_size_t bar_size;
+	struct hv_pci_dev *hpdev;
+	unsigned long flags;
+	u64 bar_val;
+	u32 command;
+	bool high;
+	int i;
+
+	if (hbus->low_mmio_space) {
+		low_size = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
+		low_base = hbus->low_mmio_res->start;
+	}
+
+	if (hbus->high_mmio_space) {
+		high_size = 1ULL <<
+			(63 - __builtin_clzll(hbus->high_mmio_space));
+		high_base = hbus->high_mmio_res->start;
+	}
+
+	spin_lock_irqsave(&hbus->device_list_lock, flags);
+
+	/* Pick addresses for the BARs. */
+	do {
+		list_for_each_entry(hpdev, &hbus->children, list_entry) {
+			for (i = 0; i < 6; i++) {
+				bar_val = hpdev->probed_bar[i];
+				if (bar_val == 0)
+					continue;
+				high = bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64;
+				if (high) {
+					bar_val |=
+						((u64)hpdev->probed_bar[i + 1]
+						 << 32);
+				} else {
+					bar_val |= 0xffffffffULL << 32;
+				}
+				bar_size = get_bar_size(bar_val);
+				if (high) {
+					if (high_size != bar_size) {
+						i++;
+						continue;
+					}
+					_hv_pcifront_write_config(hpdev,
+						PCI_BASE_ADDRESS_0 + (4 * i),
+						4,
+						(u32)(high_base & 0xffffff00));
+					i++;
+					_hv_pcifront_write_config(hpdev,
+						PCI_BASE_ADDRESS_0 + (4 * i),
+						4, (u32)(high_base >> 32));
+					high_base += bar_size;
+				} else {
+					if (low_size != bar_size)
+						continue;
+					_hv_pcifront_write_config(hpdev,
+						PCI_BASE_ADDRESS_0 + (4 * i),
+						4,
+						(u32)(low_base & 0xffffff00));
+					low_base += bar_size;
+				}
+			}
+			if (high_size <= 1 && low_size <= 1) {
+				/* Set the memory enable bit. */
+				_hv_pcifront_read_config(hpdev, PCI_COMMAND, 2,
+							 &command);
+				command |= PCI_COMMAND_MEMORY;
+				_hv_pcifront_write_config(hpdev, PCI_COMMAND, 2,
+							  command);
+				break;
+			}
+		}
+
+		high_size >>= 1;
+		low_size >>= 1;
+	}  while (high_size || low_size);
+
+	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+}
+
+/*
+ * Assign entries in sysfs pci slot directory.
+ *
+ * Note that this function does not need to lock the children list
+ * because it is called from pci_devices_present_work which
+ * is serialized with hv_eject_device_work because they are on the
+ * same ordered workqueue. Therefore hbus->children list will not change
+ * even when pci_create_slot sleeps.
+ */
+static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
+{
+	struct hv_pci_dev *hpdev;
+	char name[SLOT_NAME_SIZE];
+	int slot_nr;
+
+	list_for_each_entry(hpdev, &hbus->children, list_entry) {
+		if (hpdev->pci_slot)
+			continue;
+
+		slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
+		snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
+		hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr,
+					  name, NULL);
+		if (IS_ERR(hpdev->pci_slot)) {
+			pr_warn("pci_create slot %s failed\n", name);
+			hpdev->pci_slot = NULL;
+		}
+	}
+}
+
+/*
+ * Remove entries in sysfs pci slot directory.
+ */
+static void hv_pci_remove_slots(struct hv_pcibus_device *hbus)
+{
+	struct hv_pci_dev *hpdev;
+
+	list_for_each_entry(hpdev, &hbus->children, list_entry) {
+		if (!hpdev->pci_slot)
+			continue;
+		pci_destroy_slot(hpdev->pci_slot);
+		hpdev->pci_slot = NULL;
+	}
+}
+
+/**
+ * create_root_hv_pci_bus() - Expose a new root PCI bus
+ * @hbus:	Root PCI bus, as understood by this driver
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
+{
+	/* Register the device */
+	hbus->pci_bus = pci_create_root_bus(&hbus->hdev->device,
+					    0, /* bus number is always zero */
+					    &hv_pcifront_ops,
+					    &hbus->sysdata,
+					    &hbus->resources_for_children);
+	if (!hbus->pci_bus)
+		return -ENODEV;
+
+	hbus->pci_bus->msi = &hbus->msi_chip;
+	hbus->pci_bus->msi->dev = &hbus->hdev->device;
+
+	pci_lock_rescan_remove();
+	pci_scan_child_bus(hbus->pci_bus);
+	pci_bus_assign_resources(hbus->pci_bus);
+	hv_pci_assign_slots(hbus);
+	pci_bus_add_devices(hbus->pci_bus);
+	pci_unlock_rescan_remove();
+	hbus->state = hv_pcibus_installed;
+	return 0;
+}
+
+struct q_res_req_compl {
+	struct completion host_event;
+	struct hv_pci_dev *hpdev;
+};
+
+/**
+ * q_resource_requirements() - Query Resource Requirements
+ * @context:		The completion context.
+ * @resp:		The response that came from the host.
+ * @resp_packet_size:	The size in bytes of resp.
+ *
+ * This function is invoked on completion of a Query Resource
+ * Requirements packet.
+ */
+static void q_resource_requirements(void *context, struct pci_response *resp,
+				    int resp_packet_size)
+{
+	struct q_res_req_compl *completion = context;
+	struct pci_q_res_req_response *q_res_req =
+		(struct pci_q_res_req_response *)resp;
+	int i;
+
+	if (resp->status < 0) {
+		dev_err(&completion->hpdev->hbus->hdev->device,
+			"query resource requirements failed: %x\n",
+			resp->status);
+	} else {
+		for (i = 0; i < 6; i++) {
+			completion->hpdev->probed_bar[i] =
+				q_res_req->probed_bar[i];
+		}
+	}
+
+	complete(&completion->host_event);
+}
+
+/**
+ * new_pcichild_device() - Create a new child device
+ * @hbus:	The internal struct tracking this root PCI bus.
+ * @desc:	The information supplied so far from the host
+ *              about the device.
+ *
+ * This function creates the tracking structure for a new child
+ * device and kicks off the process of figuring out what it is.
+ *
+ * Return: Pointer to the new tracking struct
+ */
+static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
+		struct pci_function_description *desc)
+{
+	struct hv_pci_dev *hpdev;
+	struct pci_child_message *res_req;
+	struct q_res_req_compl comp_pkt;
+	struct {
+		struct pci_packet init_packet;
+		u8 buffer[sizeof(struct pci_child_message)];
+	} pkt;
+	unsigned long flags;
+	int ret;
+
+	hpdev = kzalloc(sizeof(*hpdev), GFP_KERNEL);
+	if (!hpdev)
+		return NULL;
+
+	hpdev->hbus = hbus;
+
+	memset(&pkt, 0, sizeof(pkt));
+	init_completion(&comp_pkt.host_event);
+	comp_pkt.hpdev = hpdev;
+	pkt.init_packet.compl_ctxt = &comp_pkt;
+	pkt.init_packet.completion_func = q_resource_requirements;
+	res_req = (struct pci_child_message *)&pkt.init_packet.message;
+	res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS;
+	res_req->wslot.slot = desc->win_slot.slot;
+
+	ret = vmbus_sendpacket(hbus->hdev->channel, res_req,
+			       sizeof(struct pci_child_message),
+			       (unsigned long)&pkt.init_packet,
+			       VM_PKT_DATA_INBAND,
+			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+	if (ret)
+		goto error;
+
+	if (wait_for_response(hbus->hdev, &comp_pkt.host_event))
+		goto error;
+
+	hpdev->desc = *desc;
+	refcount_set(&hpdev->refs, 1);
+	get_pcichild(hpdev);
+	spin_lock_irqsave(&hbus->device_list_lock, flags);
+
+	list_add_tail(&hpdev->list_entry, &hbus->children);
+	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+	return hpdev;
+
+error:
+	kfree(hpdev);
+	return NULL;
+}
+
+/**
+ * get_pcichild_wslot() - Find device from slot
+ * @hbus:	Root PCI bus, as understood by this driver
+ * @wslot:	Location on the bus
+ *
+ * This function looks up a PCI device and returns the internal
+ * representation of it.  It acquires a reference on it, so that
+ * the device won't be deleted while somebody is using it.  The
+ * caller is responsible for calling put_pcichild() to release
+ * this reference.
+ *
+ * Return:	Internal representation of a PCI device
+ */
+static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
+					     u32 wslot)
+{
+	unsigned long flags;
+	struct hv_pci_dev *iter, *hpdev = NULL;
+
+	spin_lock_irqsave(&hbus->device_list_lock, flags);
+	list_for_each_entry(iter, &hbus->children, list_entry) {
+		if (iter->desc.win_slot.slot == wslot) {
+			hpdev = iter;
+			get_pcichild(hpdev);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+
+	return hpdev;
+}
+
+/**
+ * pci_devices_present_work() - Handle new list of child devices
+ * @work:	Work struct embedded in struct hv_dr_work
+ *
+ * "Bus Relations" is the Windows term for "children of this
+ * bus."  The terminology is preserved here for people trying to
+ * debug the interaction between Hyper-V and Linux.  This
+ * function is called when the parent partition reports a list
+ * of functions that should be observed under this PCI Express
+ * port (bus).
+ *
+ * This function updates the list, and must tolerate being
+ * called multiple times with the same information.  The typical
+ * number of child devices is one, with very atypical cases
+ * involving three or four, so the algorithms used here can be
+ * simple and inefficient.
+ *
+ * It must also treat the omission of a previously observed device as
+ * notification that the device no longer exists.
+ *
+ * Note that this function is serialized with hv_eject_device_work(),
+ * because both are pushed to the ordered workqueue hbus->wq.
+ */
+static void pci_devices_present_work(struct work_struct *work)
+{
+	u32 child_no;
+	bool found;
+	struct pci_function_description *new_desc;
+	struct hv_pci_dev *hpdev;
+	struct hv_pcibus_device *hbus;
+	struct list_head removed;
+	struct hv_dr_work *dr_wrk;
+	struct hv_dr_state *dr = NULL;
+	unsigned long flags;
+
+	dr_wrk = container_of(work, struct hv_dr_work, wrk);
+	hbus = dr_wrk->bus;
+	kfree(dr_wrk);
+
+	INIT_LIST_HEAD(&removed);
+
+	/* Pull this off the queue and process it if it was the last one. */
+	spin_lock_irqsave(&hbus->device_list_lock, flags);
+	while (!list_empty(&hbus->dr_list)) {
+		dr = list_first_entry(&hbus->dr_list, struct hv_dr_state,
+				      list_entry);
+		list_del(&dr->list_entry);
+
+		/* Throw this away if the list still has stuff in it. */
+		if (!list_empty(&hbus->dr_list)) {
+			kfree(dr);
+			continue;
+		}
+	}
+	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+
+	if (!dr) {
+		put_hvpcibus(hbus);
+		return;
+	}
+
+	/* First, mark all existing children as reported missing. */
+	spin_lock_irqsave(&hbus->device_list_lock, flags);
+	list_for_each_entry(hpdev, &hbus->children, list_entry) {
+		hpdev->reported_missing = true;
+	}
+	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+
+	/* Next, add back any reported devices. */
+	for (child_no = 0; child_no < dr->device_count; child_no++) {
+		found = false;
+		new_desc = &dr->func[child_no];
+
+		spin_lock_irqsave(&hbus->device_list_lock, flags);
+		list_for_each_entry(hpdev, &hbus->children, list_entry) {
+			if ((hpdev->desc.win_slot.slot == new_desc->win_slot.slot) &&
+			    (hpdev->desc.v_id == new_desc->v_id) &&
+			    (hpdev->desc.d_id == new_desc->d_id) &&
+			    (hpdev->desc.ser == new_desc->ser)) {
+				hpdev->reported_missing = false;
+				found = true;
+			}
+		}
+		spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+
+		if (!found) {
+			hpdev = new_pcichild_device(hbus, new_desc);
+			if (!hpdev)
+				dev_err(&hbus->hdev->device,
+					"couldn't record a child device.\n");
+		}
+	}
+
+	/* Move missing children to a list on the stack. */
+	spin_lock_irqsave(&hbus->device_list_lock, flags);
+	do {
+		found = false;
+		list_for_each_entry(hpdev, &hbus->children, list_entry) {
+			if (hpdev->reported_missing) {
+				found = true;
+				put_pcichild(hpdev);
+				list_move_tail(&hpdev->list_entry, &removed);
+				break;
+			}
+		}
+	} while (found);
+	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+
+	/* Delete everything that should no longer exist. */
+	while (!list_empty(&removed)) {
+		hpdev = list_first_entry(&removed, struct hv_pci_dev,
+					 list_entry);
+		list_del(&hpdev->list_entry);
+
+		if (hpdev->pci_slot)
+			pci_destroy_slot(hpdev->pci_slot);
+
+		put_pcichild(hpdev);
+	}
+
+	switch (hbus->state) {
+	case hv_pcibus_installed:
+		/*
+		 * Tell the core to rescan bus
+		 * because there may have been changes.
+		 */
+		pci_lock_rescan_remove();
+		pci_scan_child_bus(hbus->pci_bus);
+		hv_pci_assign_slots(hbus);
+		pci_unlock_rescan_remove();
+		break;
+
+	case hv_pcibus_init:
+	case hv_pcibus_probed:
+		survey_child_resources(hbus);
+		break;
+
+	default:
+		break;
+	}
+
+	put_hvpcibus(hbus);
+	kfree(dr);
+}
+
+/**
+ * hv_pci_devices_present() - Handles list of new children
+ * @hbus:	Root PCI bus, as understood by this driver
+ * @relations:	Packet from host listing children
+ *
+ * This function is invoked whenever a new list of devices for
+ * this bus appears.
+ */
+static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
+				   struct pci_bus_relations *relations)
+{
+	struct hv_dr_state *dr;
+	struct hv_dr_work *dr_wrk;
+	unsigned long flags;
+	bool pending_dr;
+
+	dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT);
+	if (!dr_wrk)
+		return;
+
+	dr = kzalloc(offsetof(struct hv_dr_state, func) +
+		     (sizeof(struct pci_function_description) *
+		      (relations->device_count)), GFP_NOWAIT);
+	if (!dr)  {
+		kfree(dr_wrk);
+		return;
+	}
+
+	INIT_WORK(&dr_wrk->wrk, pci_devices_present_work);
+	dr_wrk->bus = hbus;
+	dr->device_count = relations->device_count;
+	if (dr->device_count != 0) {
+		memcpy(dr->func, relations->func,
+		       sizeof(struct pci_function_description) *
+		       dr->device_count);
+	}
+
+	spin_lock_irqsave(&hbus->device_list_lock, flags);
+	/*
+	 * If pending_dr is true, we have already queued a work,
+	 * which will see the new dr. Otherwise, we need to
+	 * queue a new work.
+	 */
+	pending_dr = !list_empty(&hbus->dr_list);
+	list_add_tail(&dr->list_entry, &hbus->dr_list);
+	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+
+	if (pending_dr) {
+		kfree(dr_wrk);
+	} else {
+		get_hvpcibus(hbus);
+		queue_work(hbus->wq, &dr_wrk->wrk);
+	}
+}
+
+/**
+ * hv_eject_device_work() - Asynchronously handles ejection
+ * @work:	Work struct embedded in internal device struct
+ *
+ * This function handles ejecting a device.  Windows will
+ * attempt to gracefully eject a device, waiting 60 seconds to
+ * hear back from the guest OS that this completed successfully.
+ * If this timer expires, the device will be forcibly removed.
+ */
+static void hv_eject_device_work(struct work_struct *work)
+{
+	struct pci_eject_response *ejct_pkt;
+	struct hv_pcibus_device *hbus;
+	struct hv_pci_dev *hpdev;
+	struct pci_dev *pdev;
+	unsigned long flags;
+	int wslot;
+	struct {
+		struct pci_packet pkt;
+		u8 buffer[sizeof(struct pci_eject_response)];
+	} ctxt;
+
+	hpdev = container_of(work, struct hv_pci_dev, wrk);
+	hbus = hpdev->hbus;
+
+	WARN_ON(hpdev->state != hv_pcichild_ejecting);
+
+	/*
+	 * Ejection can come before or after the PCI bus has been set up, so
+	 * attempt to find it and tear down the bus state, if it exists.  This
+	 * must be done without constructs like pci_domain_nr(hbus->pci_bus)
+	 * because hbus->pci_bus may not exist yet.
+	 */
+	wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
+	pdev = pci_get_domain_bus_and_slot(hbus->sysdata.domain, 0, wslot);
+	if (pdev) {
+		pci_lock_rescan_remove();
+		pci_stop_and_remove_bus_device(pdev);
+		pci_dev_put(pdev);
+		pci_unlock_rescan_remove();
+	}
+
+	spin_lock_irqsave(&hbus->device_list_lock, flags);
+	list_del(&hpdev->list_entry);
+	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+
+	if (hpdev->pci_slot)
+		pci_destroy_slot(hpdev->pci_slot);
+
+	memset(&ctxt, 0, sizeof(ctxt));
+	ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
+	ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
+	ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
+	vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
+			 sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
+			 VM_PKT_DATA_INBAND, 0);
+
+	/* For the get_pcichild() in hv_pci_eject_device() */
+	put_pcichild(hpdev);
+	/* For the two refs got in new_pcichild_device() */
+	put_pcichild(hpdev);
+	put_pcichild(hpdev);
+	/* hpdev has been freed. Do not use it any more. */
+
+	put_hvpcibus(hbus);
+}
+
+/**
+ * hv_pci_eject_device() - Handles device ejection
+ * @hpdev:	Internal device tracking struct
+ *
+ * This function is invoked when an ejection packet arrives.  It
+ * just schedules work so that we don't re-enter the packet
+ * delivery code handling the ejection.
+ */
+static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
+{
+	hpdev->state = hv_pcichild_ejecting;
+	get_pcichild(hpdev);
+	INIT_WORK(&hpdev->wrk, hv_eject_device_work);
+	get_hvpcibus(hpdev->hbus);
+	queue_work(hpdev->hbus->wq, &hpdev->wrk);
+}
+
+/**
+ * hv_pci_onchannelcallback() - Handles incoming packets
+ * @context:	Internal bus tracking struct
+ *
+ * This function is invoked whenever the host sends a packet to
+ * this channel (which is private to this root PCI bus).
+ */
+static void hv_pci_onchannelcallback(void *context)
+{
+	const int packet_size = 0x100;
+	int ret;
+	struct hv_pcibus_device *hbus = context;
+	u32 bytes_recvd;
+	u64 req_id;
+	struct vmpacket_descriptor *desc;
+	unsigned char *buffer;
+	int bufferlen = packet_size;
+	struct pci_packet *comp_packet;
+	struct pci_response *response;
+	struct pci_incoming_message *new_message;
+	struct pci_bus_relations *bus_rel;
+	struct pci_dev_inval_block *inval;
+	struct pci_dev_incoming *dev_message;
+	struct hv_pci_dev *hpdev;
+
+	buffer = kmalloc(bufferlen, GFP_ATOMIC);
+	if (!buffer)
+		return;
+
+	while (1) {
+		ret = vmbus_recvpacket_raw(hbus->hdev->channel, buffer,
+					   bufferlen, &bytes_recvd, &req_id);
+
+		if (ret == -ENOBUFS) {
+			kfree(buffer);
+			/* Handle large packet */
+			bufferlen = bytes_recvd;
+			buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
+			if (!buffer)
+				return;
+			continue;
+		}
+
+		/* Zero length indicates there are no more packets. */
+		if (ret || !bytes_recvd)
+			break;
+
+		/*
+		 * All incoming packets must be at least as large as a
+		 * response.
+		 */
+		if (bytes_recvd <= sizeof(struct pci_response))
+			continue;
+		desc = (struct vmpacket_descriptor *)buffer;
+
+		switch (desc->type) {
+		case VM_PKT_COMP:
+
+			/*
+			 * The host is trusted, and thus it's safe to interpret
+			 * this transaction ID as a pointer.
+			 */
+			comp_packet = (struct pci_packet *)req_id;
+			response = (struct pci_response *)buffer;
+			comp_packet->completion_func(comp_packet->compl_ctxt,
+						     response,
+						     bytes_recvd);
+			break;
+
+		case VM_PKT_DATA_INBAND:
+
+			new_message = (struct pci_incoming_message *)buffer;
+			switch (new_message->message_type.type) {
+			case PCI_BUS_RELATIONS:
+
+				bus_rel = (struct pci_bus_relations *)buffer;
+				if (bytes_recvd <
+				    offsetof(struct pci_bus_relations, func) +
+				    (sizeof(struct pci_function_description) *
+				     (bus_rel->device_count))) {
+					dev_err(&hbus->hdev->device,
+						"bus relations too small\n");
+					break;
+				}
+
+				hv_pci_devices_present(hbus, bus_rel);
+				break;
+
+			case PCI_EJECT:
+
+				dev_message = (struct pci_dev_incoming *)buffer;
+				hpdev = get_pcichild_wslot(hbus,
+						      dev_message->wslot.slot);
+				if (hpdev) {
+					hv_pci_eject_device(hpdev);
+					put_pcichild(hpdev);
+				}
+				break;
+
+			case PCI_INVALIDATE_BLOCK:
+
+				inval = (struct pci_dev_inval_block *)buffer;
+				hpdev = get_pcichild_wslot(hbus,
+							   inval->wslot.slot);
+				if (hpdev) {
+					if (hpdev->block_invalidate) {
+						hpdev->block_invalidate(
+						    hpdev->invalidate_context,
+						    inval->block_mask);
+					}
+					put_pcichild(hpdev);
+				}
+				break;
+
+			default:
+				dev_warn(&hbus->hdev->device,
+					"Unimplemented protocol message %x\n",
+					new_message->message_type.type);
+				break;
+			}
+			break;
+
+		default:
+			dev_err(&hbus->hdev->device,
+				"unhandled packet type %d, tid %llx len %d\n",
+				desc->type, req_id, bytes_recvd);
+			break;
+		}
+	}
+
+	kfree(buffer);
+}
+
+/**
+ * hv_pci_protocol_negotiation() - Set up protocol
+ * @hdev:	VMBus's tracking struct for this root PCI bus
+ *
+ * This driver is intended to support running on Windows 10
+ * (server) and later versions. It will not run on earlier
+ * versions, as they assume that many of the operations which
+ * Linux needs accomplished with a spinlock held were done via
+ * asynchronous messaging via VMBus.  Windows 10 increases the
+ * surface area of PCI emulation so that these actions can take
+ * place by suspending a virtual processor for their duration.
+ *
+ * This function negotiates the channel protocol version,
+ * failing if the host doesn't support the necessary protocol
+ * level.
+ */
+static int hv_pci_protocol_negotiation(struct hv_device *hdev)
+{
+	struct pci_version_request *version_req;
+	struct hv_pci_compl comp_pkt;
+	struct pci_packet *pkt;
+	int ret;
+	int i;
+
+	/*
+	 * Initiate the handshake with the host and negotiate
+	 * a version that the host can support. We start with the
+	 * highest version number and go down if the host cannot
+	 * support it.
+	 */
+	pkt = kzalloc(sizeof(*pkt) + sizeof(*version_req), GFP_KERNEL);
+	if (!pkt)
+		return -ENOMEM;
+
+	init_completion(&comp_pkt.host_event);
+	pkt->completion_func = hv_pci_generic_compl;
+	pkt->compl_ctxt = &comp_pkt;
+	version_req = (struct pci_version_request *)&pkt->message;
+	version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
+
+	for (i = 0; i < ARRAY_SIZE(pci_protocol_versions); i++) {
+		version_req->protocol_version = pci_protocol_versions[i];
+		ret = vmbus_sendpacket(hdev->channel, version_req,
+				sizeof(struct pci_version_request),
+				(unsigned long)pkt, VM_PKT_DATA_INBAND,
+				VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+		if (!ret)
+			ret = wait_for_response(hdev, &comp_pkt.host_event);
+
+		if (ret) {
+			dev_err(&hdev->device,
+				"PCI Pass-through VSP failed to request version: %d",
+				ret);
+			goto exit;
+		}
+
+		if (comp_pkt.completion_status >= 0) {
+			pci_protocol_version = pci_protocol_versions[i];
+			dev_info(&hdev->device,
+				"PCI VMBus probing: Using version %#x\n",
+				pci_protocol_version);
+			goto exit;
+		}
+
+		if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) {
+			dev_err(&hdev->device,
+				"PCI Pass-through VSP failed version request: %#x",
+				comp_pkt.completion_status);
+			ret = -EPROTO;
+			goto exit;
+		}
+
+		reinit_completion(&comp_pkt.host_event);
+	}
+
+	dev_err(&hdev->device,
+		"PCI pass-through VSP failed to find supported version");
+	ret = -EPROTO;
+
+exit:
+	kfree(pkt);
+	return ret;
+}
+
+/**
+ * hv_pci_free_bridge_windows() - Release memory regions for the
+ * bus
+ * @hbus:	Root PCI bus, as understood by this driver
+ */
+static void hv_pci_free_bridge_windows(struct hv_pcibus_device *hbus)
+{
+	/*
+	 * Set the resources back to the way they looked when they
+	 * were allocated by setting IORESOURCE_BUSY again.
+	 */
+
+	if (hbus->low_mmio_space && hbus->low_mmio_res) {
+		hbus->low_mmio_res->flags |= IORESOURCE_BUSY;
+		vmbus_free_mmio(hbus->low_mmio_res->start,
+				resource_size(hbus->low_mmio_res));
+	}
+
+	if (hbus->high_mmio_space && hbus->high_mmio_res) {
+		hbus->high_mmio_res->flags |= IORESOURCE_BUSY;
+		vmbus_free_mmio(hbus->high_mmio_res->start,
+				resource_size(hbus->high_mmio_res));
+	}
+}
+
+/**
+ * hv_pci_allocate_bridge_windows() - Allocate memory regions
+ * for the bus
+ * @hbus:	Root PCI bus, as understood by this driver
+ *
+ * This function calls vmbus_allocate_mmio(), which is itself a
+ * bit of a compromise.  Ideally, we might change the pnp layer
+ * in the kernel such that it comprehends either PCI devices
+ * which are "grandchildren of ACPI," with some intermediate bus
+ * node (in this case, VMBus) or change it such that it
+ * understands VMBus.  The pnp layer, however, has been declared
+ * deprecated, and not subject to change.
+ *
+ * The workaround, implemented here, is to ask VMBus to allocate
+ * MMIO space for this bus.  VMBus itself knows which ranges are
+ * appropriate by looking at its own ACPI objects.  Then, after
+ * these ranges are claimed, they're modified to look like they
+ * would have looked if the ACPI and pnp code had allocated
+ * bridge windows.  These descriptors have to exist in this form
+ * in order to satisfy the code which will get invoked when the
+ * endpoint PCI function driver calls request_mem_region() or
+ * request_mem_region_exclusive().
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus)
+{
+	resource_size_t align;
+	int ret;
+
+	if (hbus->low_mmio_space) {
+		align = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
+		ret = vmbus_allocate_mmio(&hbus->low_mmio_res, hbus->hdev, 0,
+					  (u64)(u32)0xffffffff,
+					  hbus->low_mmio_space,
+					  align, false);
+		if (ret) {
+			dev_err(&hbus->hdev->device,
+				"Need %#llx of low MMIO space. Consider reconfiguring the VM.\n",
+				hbus->low_mmio_space);
+			return ret;
+		}
+
+		/* Modify this resource to become a bridge window. */
+		hbus->low_mmio_res->flags |= IORESOURCE_WINDOW;
+		hbus->low_mmio_res->flags &= ~IORESOURCE_BUSY;
+		pci_add_resource(&hbus->resources_for_children,
+				 hbus->low_mmio_res);
+	}
+
+	if (hbus->high_mmio_space) {
+		align = 1ULL << (63 - __builtin_clzll(hbus->high_mmio_space));
+		ret = vmbus_allocate_mmio(&hbus->high_mmio_res, hbus->hdev,
+					  0x100000000, -1,
+					  hbus->high_mmio_space, align,
+					  false);
+		if (ret) {
+			dev_err(&hbus->hdev->device,
+				"Need %#llx of high MMIO space. Consider reconfiguring the VM.\n",
+				hbus->high_mmio_space);
+			goto release_low_mmio;
+		}
+
+		/* Modify this resource to become a bridge window. */
+		hbus->high_mmio_res->flags |= IORESOURCE_WINDOW;
+		hbus->high_mmio_res->flags &= ~IORESOURCE_BUSY;
+		pci_add_resource(&hbus->resources_for_children,
+				 hbus->high_mmio_res);
+	}
+
+	return 0;
+
+release_low_mmio:
+	if (hbus->low_mmio_res) {
+		vmbus_free_mmio(hbus->low_mmio_res->start,
+				resource_size(hbus->low_mmio_res));
+	}
+
+	return ret;
+}
+
+/**
+ * hv_allocate_config_window() - Find MMIO space for PCI Config
+ * @hbus:	Root PCI bus, as understood by this driver
+ *
+ * This function claims memory-mapped I/O space for accessing
+ * configuration space for the functions on this bus.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_allocate_config_window(struct hv_pcibus_device *hbus)
+{
+	int ret;
+
+	/*
+	 * Set up a region of MMIO space to use for accessing configuration
+	 * space.
+	 */
+	ret = vmbus_allocate_mmio(&hbus->mem_config, hbus->hdev, 0, -1,
+				  PCI_CONFIG_MMIO_LENGTH, 0x1000, false);
+	if (ret)
+		return ret;
+
+	/*
+	 * vmbus_allocate_mmio() gets used for allocating both device endpoint
+	 * resource claims (those which cannot be overlapped) and the ranges
+	 * which are valid for the children of this bus, which are intended
+	 * to be overlapped by those children.  Set the flag on this claim
+	 * meaning that this region can't be overlapped.
+	 */
+
+	hbus->mem_config->flags |= IORESOURCE_BUSY;
+
+	return 0;
+}
+
+static void hv_free_config_window(struct hv_pcibus_device *hbus)
+{
+	vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH);
+}
+
+/**
+ * hv_pci_enter_d0() - Bring the "bus" into the D0 power state
+ * @hdev:	VMBus's tracking struct for this root PCI bus
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_pci_enter_d0(struct hv_device *hdev)
+{
+	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+	struct pci_bus_d0_entry *d0_entry;
+	struct hv_pci_compl comp_pkt;
+	struct pci_packet *pkt;
+	int ret;
+
+	/*
+	 * Tell the host that the bus is ready to use, and moved into the
+	 * powered-on state.  This includes telling the host which region
+	 * of memory-mapped I/O space has been chosen for configuration space
+	 * access.
+	 */
+	pkt = kzalloc(sizeof(*pkt) + sizeof(*d0_entry), GFP_KERNEL);
+	if (!pkt)
+		return -ENOMEM;
+
+	init_completion(&comp_pkt.host_event);
+	pkt->completion_func = hv_pci_generic_compl;
+	pkt->compl_ctxt = &comp_pkt;
+	d0_entry = (struct pci_bus_d0_entry *)&pkt->message;
+	d0_entry->message_type.type = PCI_BUS_D0ENTRY;
+	d0_entry->mmio_base = hbus->mem_config->start;
+
+	ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
+			       (unsigned long)pkt, VM_PKT_DATA_INBAND,
+			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+	if (!ret)
+		ret = wait_for_response(hdev, &comp_pkt.host_event);
+
+	if (ret)
+		goto exit;
+
+	if (comp_pkt.completion_status < 0) {
+		dev_err(&hdev->device,
+			"PCI Pass-through VSP failed D0 Entry with status %x\n",
+			comp_pkt.completion_status);
+		ret = -EPROTO;
+		goto exit;
+	}
+
+	ret = 0;
+
+exit:
+	kfree(pkt);
+	return ret;
+}
+
+/**
+ * hv_pci_query_relations() - Ask host to send list of child
+ * devices
+ * @hdev:	VMBus's tracking struct for this root PCI bus
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_pci_query_relations(struct hv_device *hdev)
+{
+	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+	struct pci_message message;
+	struct completion comp;
+	int ret;
+
+	/* Ask the host to send along the list of child devices */
+	init_completion(&comp);
+	if (cmpxchg(&hbus->survey_event, NULL, &comp))
+		return -ENOTEMPTY;
+
+	memset(&message, 0, sizeof(message));
+	message.type = PCI_QUERY_BUS_RELATIONS;
+
+	ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
+			       0, VM_PKT_DATA_INBAND, 0);
+	if (!ret)
+		ret = wait_for_response(hdev, &comp);
+
+	/*
+	 * In the case of fast device addition/removal, it's possible that
+	 * vmbus_sendpacket() or wait_for_response() returns -ENODEV but we
+	 * already got a PCI_BUS_RELATIONS* message from the host and the
+	 * channel callback already scheduled a work to hbus->wq, which can be
+	 * running pci_devices_present_work() -> survey_child_resources() ->
+	 * complete(&hbus->survey_event), even after hv_pci_query_relations()
+	 * exits and the stack variable 'comp' is no longer valid; as a result,
+	 * a hang or a page fault may happen when the complete() calls
+	 * raw_spin_lock_irqsave(). Flush hbus->wq before we exit from
+	 * hv_pci_query_relations() to avoid the issues. Note: if 'ret' is
+	 * -ENODEV, there can't be any more work item scheduled to hbus->wq
+	 * after the flush_workqueue(): see vmbus_onoffer_rescind() ->
+	 * vmbus_reset_channel_cb(), vmbus_rescind_cleanup() ->
+	 * channel->rescind = true.
+	 */
+	flush_workqueue(hbus->wq);
+
+	return ret;
+}
+
+/**
+ * hv_send_resources_allocated() - Report local resource choices
+ * @hdev:	VMBus's tracking struct for this root PCI bus
+ *
+ * The host OS is expecting to be sent a request as a message
+ * which contains all the resources that the device will use.
+ * The response contains those same resources, "translated"
+ * which is to say, the values which should be used by the
+ * hardware, when it delivers an interrupt.  (MMIO resources are
+ * used in local terms.)  This is nice for Windows, and lines up
+ * with the FDO/PDO split, which doesn't exist in Linux.  Linux
+ * is deeply expecting to scan an emulated PCI configuration
+ * space.  So this message is sent here only to drive the state
+ * machine on the host forward.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_send_resources_allocated(struct hv_device *hdev)
+{
+	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+	struct pci_resources_assigned *res_assigned;
+	struct pci_resources_assigned2 *res_assigned2;
+	struct hv_pci_compl comp_pkt;
+	struct hv_pci_dev *hpdev;
+	struct pci_packet *pkt;
+	size_t size_res;
+	u32 wslot;
+	int ret;
+
+	size_res = (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2)
+			? sizeof(*res_assigned) : sizeof(*res_assigned2);
+
+	pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL);
+	if (!pkt)
+		return -ENOMEM;
+
+	ret = 0;
+
+	for (wslot = 0; wslot < 256; wslot++) {
+		hpdev = get_pcichild_wslot(hbus, wslot);
+		if (!hpdev)
+			continue;
+
+		memset(pkt, 0, sizeof(*pkt) + size_res);
+		init_completion(&comp_pkt.host_event);
+		pkt->completion_func = hv_pci_generic_compl;
+		pkt->compl_ctxt = &comp_pkt;
+
+		if (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2) {
+			res_assigned =
+				(struct pci_resources_assigned *)&pkt->message;
+			res_assigned->message_type.type =
+				PCI_RESOURCES_ASSIGNED;
+			res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
+		} else {
+			res_assigned2 =
+				(struct pci_resources_assigned2 *)&pkt->message;
+			res_assigned2->message_type.type =
+				PCI_RESOURCES_ASSIGNED2;
+			res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
+		}
+		put_pcichild(hpdev);
+
+		ret = vmbus_sendpacket(hdev->channel, &pkt->message,
+				size_res, (unsigned long)pkt,
+				VM_PKT_DATA_INBAND,
+				VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+		if (!ret)
+			ret = wait_for_response(hdev, &comp_pkt.host_event);
+		if (ret)
+			break;
+
+		if (comp_pkt.completion_status < 0) {
+			ret = -EPROTO;
+			dev_err(&hdev->device,
+				"resource allocated returned 0x%x",
+				comp_pkt.completion_status);
+			break;
+		}
+	}
+
+	kfree(pkt);
+	return ret;
+}
+
+/**
+ * hv_send_resources_released() - Report local resources
+ * released
+ * @hdev:	VMBus's tracking struct for this root PCI bus
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_send_resources_released(struct hv_device *hdev)
+{
+	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+	struct pci_child_message pkt;
+	struct hv_pci_dev *hpdev;
+	u32 wslot;
+	int ret;
+
+	for (wslot = 0; wslot < 256; wslot++) {
+		hpdev = get_pcichild_wslot(hbus, wslot);
+		if (!hpdev)
+			continue;
+
+		memset(&pkt, 0, sizeof(pkt));
+		pkt.message_type.type = PCI_RESOURCES_RELEASED;
+		pkt.wslot.slot = hpdev->desc.win_slot.slot;
+
+		put_pcichild(hpdev);
+
+		ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0,
+				       VM_PKT_DATA_INBAND, 0);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static void get_hvpcibus(struct hv_pcibus_device *hbus)
+{
+	refcount_inc(&hbus->remove_lock);
+}
+
+static void put_hvpcibus(struct hv_pcibus_device *hbus)
+{
+	if (refcount_dec_and_test(&hbus->remove_lock))
+		complete(&hbus->remove_event);
+}
+
+#define HVPCI_DOM_MAP_SIZE (64 * 1024)
+static DECLARE_BITMAP(hvpci_dom_map, HVPCI_DOM_MAP_SIZE);
+
+/*
+ * PCI domain number 0 is used by emulated devices on Gen1 VMs, so define 0
+ * as invalid for passthrough PCI devices of this driver.
+ */
+#define HVPCI_DOM_INVALID 0
+
+/**
+ * hv_get_dom_num() - Get a valid PCI domain number
+ * Check if the PCI domain number is in use, and return another number if
+ * it is in use.
+ *
+ * @dom: Requested domain number
+ *
+ * return: domain number on success, HVPCI_DOM_INVALID on failure
+ */
+static u16 hv_get_dom_num(u16 dom)
+{
+	unsigned int i;
+
+	if (test_and_set_bit(dom, hvpci_dom_map) == 0)
+		return dom;
+
+	for_each_clear_bit(i, hvpci_dom_map, HVPCI_DOM_MAP_SIZE) {
+		if (test_and_set_bit(i, hvpci_dom_map) == 0)
+			return i;
+	}
+
+	return HVPCI_DOM_INVALID;
+}
+
+/**
+ * hv_put_dom_num() - Mark the PCI domain number as free
+ * @dom: Domain number to be freed
+ */
+static void hv_put_dom_num(u16 dom)
+{
+	clear_bit(dom, hvpci_dom_map);
+}
+
+/**
+ * hv_pci_probe() - New VMBus channel probe, for a root PCI bus
+ * @hdev:	VMBus's tracking struct for this root PCI bus
+ * @dev_id:	Identifies the device itself
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_pci_probe(struct hv_device *hdev,
+			const struct hv_vmbus_device_id *dev_id)
+{
+	struct hv_pcibus_device *hbus;
+	u16 dom_req, dom;
+	char *name;
+	int ret;
+
+	/*
+	 * hv_pcibus_device contains the hypercall arguments for retargeting in
+	 * hv_irq_unmask(). Those must not cross a page boundary.
+	 */
+	BUILD_BUG_ON(sizeof(*hbus) > PAGE_SIZE);
+
+	hbus = (struct hv_pcibus_device *)get_zeroed_page(GFP_KERNEL);
+	if (!hbus)
+		return -ENOMEM;
+	hbus->state = hv_pcibus_init;
+
+	/*
+	 * The PCI bus "domain" is what is called "segment" in ACPI and other
+	 * specs. Pull it from the instance ID, to get something usually
+	 * unique. In rare cases of collision, we will find out another number
+	 * not in use.
+	 *
+	 * Note that, since this code only runs in a Hyper-V VM, Hyper-V
+	 * together with this guest driver can guarantee that (1) The only
+	 * domain used by Gen1 VMs for something that looks like a physical
+	 * PCI bus (which is actually emulated by the hypervisor) is domain 0.
+	 * (2) There will be no overlap between domains (after fixing possible
+	 * collisions) in the same VM.
+	 */
+	dom_req = hdev->dev_instance.b[5] << 8 | hdev->dev_instance.b[4];
+	dom = hv_get_dom_num(dom_req);
+
+	if (dom == HVPCI_DOM_INVALID) {
+		dev_err(&hdev->device,
+			"Unable to use dom# 0x%hx or other numbers", dom_req);
+		ret = -EINVAL;
+		goto free_bus;
+	}
+
+	if (dom != dom_req)
+		dev_info(&hdev->device,
+			 "PCI dom# 0x%hx has collision, using 0x%hx",
+			 dom_req, dom);
+
+	hbus->sysdata.domain = dom;
+
+	hbus->hdev = hdev;
+	refcount_set(&hbus->remove_lock, 1);
+	INIT_LIST_HEAD(&hbus->children);
+	INIT_LIST_HEAD(&hbus->dr_list);
+	INIT_LIST_HEAD(&hbus->resources_for_children);
+	spin_lock_init(&hbus->config_lock);
+	spin_lock_init(&hbus->device_list_lock);
+	spin_lock_init(&hbus->retarget_msi_interrupt_lock);
+	init_completion(&hbus->remove_event);
+	hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,
+					   hbus->sysdata.domain);
+	if (!hbus->wq) {
+		ret = -ENOMEM;
+		goto free_dom;
+	}
+
+	ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
+			 hv_pci_onchannelcallback, hbus);
+	if (ret)
+		goto destroy_wq;
+
+	hv_set_drvdata(hdev, hbus);
+
+	ret = hv_pci_protocol_negotiation(hdev);
+	if (ret)
+		goto close;
+
+	ret = hv_allocate_config_window(hbus);
+	if (ret)
+		goto close;
+
+	hbus->cfg_addr = ioremap(hbus->mem_config->start,
+				 PCI_CONFIG_MMIO_LENGTH);
+	if (!hbus->cfg_addr) {
+		dev_err(&hdev->device,
+			"Unable to map a virtual address for config space\n");
+		ret = -ENOMEM;
+		goto free_config;
+	}
+
+	name = kasprintf(GFP_KERNEL, "%pUL", &hdev->dev_instance);
+	if (!name) {
+		ret = -ENOMEM;
+		goto unmap;
+	}
+
+	hbus->sysdata.fwnode = irq_domain_alloc_named_fwnode(name);
+	kfree(name);
+	if (!hbus->sysdata.fwnode) {
+		ret = -ENOMEM;
+		goto unmap;
+	}
+
+	ret = hv_pcie_init_irq_domain(hbus);
+	if (ret)
+		goto free_fwnode;
+
+	ret = hv_pci_query_relations(hdev);
+	if (ret)
+		goto free_irq_domain;
+
+	ret = hv_pci_enter_d0(hdev);
+	if (ret)
+		goto free_irq_domain;
+
+	ret = hv_pci_allocate_bridge_windows(hbus);
+	if (ret)
+		goto free_irq_domain;
+
+	ret = hv_send_resources_allocated(hdev);
+	if (ret)
+		goto free_windows;
+
+	prepopulate_bars(hbus);
+
+	hbus->state = hv_pcibus_probed;
+
+	ret = create_root_hv_pci_bus(hbus);
+	if (ret)
+		goto free_windows;
+
+	return 0;
+
+free_windows:
+	hv_pci_free_bridge_windows(hbus);
+free_irq_domain:
+	irq_domain_remove(hbus->irq_domain);
+free_fwnode:
+	irq_domain_free_fwnode(hbus->sysdata.fwnode);
+unmap:
+	iounmap(hbus->cfg_addr);
+free_config:
+	hv_free_config_window(hbus);
+close:
+	vmbus_close(hdev->channel);
+destroy_wq:
+	destroy_workqueue(hbus->wq);
+free_dom:
+	hv_put_dom_num(hbus->sysdata.domain);
+free_bus:
+	free_page((unsigned long)hbus);
+	return ret;
+}
+
+static void hv_pci_bus_exit(struct hv_device *hdev)
+{
+	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+	struct {
+		struct pci_packet teardown_packet;
+		u8 buffer[sizeof(struct pci_message)];
+	} pkt;
+	struct pci_bus_relations relations;
+	struct hv_pci_compl comp_pkt;
+	int ret;
+
+	/*
+	 * After the host sends the RESCIND_CHANNEL message, it doesn't
+	 * access the per-channel ringbuffer any longer.
+	 */
+	if (hdev->channel->rescind)
+		return;
+
+	/* Delete any children which might still exist. */
+	memset(&relations, 0, sizeof(relations));
+	hv_pci_devices_present(hbus, &relations);
+
+	ret = hv_send_resources_released(hdev);
+	if (ret)
+		dev_err(&hdev->device,
+			"Couldn't send resources released packet(s)\n");
+
+	memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
+	init_completion(&comp_pkt.host_event);
+	pkt.teardown_packet.completion_func = hv_pci_generic_compl;
+	pkt.teardown_packet.compl_ctxt = &comp_pkt;
+	pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT;
+
+	ret = vmbus_sendpacket(hdev->channel, &pkt.teardown_packet.message,
+			       sizeof(struct pci_message),
+			       (unsigned long)&pkt.teardown_packet,
+			       VM_PKT_DATA_INBAND,
+			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+	if (!ret)
+		wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ);
+}
+
+/**
+ * hv_pci_remove() - Remove routine for this VMBus channel
+ * @hdev:	VMBus's tracking struct for this root PCI bus
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_pci_remove(struct hv_device *hdev)
+{
+	struct hv_pcibus_device *hbus;
+
+	hbus = hv_get_drvdata(hdev);
+	if (hbus->state == hv_pcibus_installed) {
+		/* Remove the bus from PCI's point of view. */
+		pci_lock_rescan_remove();
+		pci_stop_root_bus(hbus->pci_bus);
+		hv_pci_remove_slots(hbus);
+		pci_remove_root_bus(hbus->pci_bus);
+		pci_unlock_rescan_remove();
+		hbus->state = hv_pcibus_removed;
+	}
+
+	hv_pci_bus_exit(hdev);
+
+	vmbus_close(hdev->channel);
+
+	iounmap(hbus->cfg_addr);
+	hv_free_config_window(hbus);
+	pci_free_resource_list(&hbus->resources_for_children);
+	hv_pci_free_bridge_windows(hbus);
+	irq_domain_remove(hbus->irq_domain);
+	irq_domain_free_fwnode(hbus->sysdata.fwnode);
+	put_hvpcibus(hbus);
+	wait_for_completion(&hbus->remove_event);
+	destroy_workqueue(hbus->wq);
+
+	hv_put_dom_num(hbus->sysdata.domain);
+
+	free_page((unsigned long)hbus);
+	return 0;
+}
+
+static const struct hv_vmbus_device_id hv_pci_id_table[] = {
+	/* PCI Pass-through Class ID */
+	/* 44C4F61D-4444-4400-9D52-802E27EDE19F */
+	{ HV_PCIE_GUID, },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(vmbus, hv_pci_id_table);
+
+static struct hv_driver hv_pci_drv = {
+	.name		= "hv_pci",
+	.id_table	= hv_pci_id_table,
+	.probe		= hv_pci_probe,
+	.remove		= hv_pci_remove,
+};
+
+static void __exit exit_hv_pci_drv(void)
+{
+	vmbus_driver_unregister(&hv_pci_drv);
+
+	hvpci_block_ops.read_block = NULL;
+	hvpci_block_ops.write_block = NULL;
+	hvpci_block_ops.reg_blk_invalidate = NULL;
+}
+
+static int __init init_hv_pci_drv(void)
+{
+	if (!hv_is_hyperv_initialized())
+		return -ENODEV;
+
+	/* Set the invalid domain number's bit, so it will not be used */
+	set_bit(HVPCI_DOM_INVALID, hvpci_dom_map);
+
+	/* Initialize PCI block r/w interface */
+	hvpci_block_ops.read_block = hv_read_config_block;
+	hvpci_block_ops.write_block = hv_write_config_block;
+	hvpci_block_ops.reg_blk_invalidate = hv_register_block_invalidate;
+
+	return vmbus_driver_register(&hv_pci_drv);
+}
+
+module_init(init_hv_pci_drv);
+module_exit(exit_hv_pci_drv);
+
+MODULE_DESCRIPTION("Hyper-V PCI");
+MODULE_LICENSE("GPL v2");
diff --git a/marvell/linux/drivers/pci/controller/pci-mvebu.c b/marvell/linux/drivers/pci/controller/pci-mvebu.c
new file mode 100644
index 0000000..09af970
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pci-mvebu.c
@@ -0,0 +1,1172 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe driver for Marvell Armada 370 and Armada XP SoCs
+ *
+ * Author: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/mbus.h>
+#include <linux/msi.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+
+#include "../pci.h"
+#include "../pci-bridge-emul.h"
+
+/*
+ * PCIe unit register offsets.
+ */
+#define PCIE_DEV_ID_OFF		0x0000
+#define PCIE_CMD_OFF		0x0004
+#define PCIE_DEV_REV_OFF	0x0008
+#define PCIE_BAR_LO_OFF(n)	(0x0010 + ((n) << 3))
+#define PCIE_BAR_HI_OFF(n)	(0x0014 + ((n) << 3))
+#define PCIE_CAP_PCIEXP		0x0060
+#define PCIE_HEADER_LOG_4_OFF	0x0128
+#define PCIE_BAR_CTRL_OFF(n)	(0x1804 + (((n) - 1) * 4))
+#define PCIE_WIN04_CTRL_OFF(n)	(0x1820 + ((n) << 4))
+#define PCIE_WIN04_BASE_OFF(n)	(0x1824 + ((n) << 4))
+#define PCIE_WIN04_REMAP_OFF(n)	(0x182c + ((n) << 4))
+#define PCIE_WIN5_CTRL_OFF	0x1880
+#define PCIE_WIN5_BASE_OFF	0x1884
+#define PCIE_WIN5_REMAP_OFF	0x188c
+#define PCIE_CONF_ADDR_OFF	0x18f8
+#define  PCIE_CONF_ADDR_EN		0x80000000
+#define  PCIE_CONF_REG(r)		((((r) & 0xf00) << 16) | ((r) & 0xfc))
+#define  PCIE_CONF_BUS(b)		(((b) & 0xff) << 16)
+#define  PCIE_CONF_DEV(d)		(((d) & 0x1f) << 11)
+#define  PCIE_CONF_FUNC(f)		(((f) & 0x7) << 8)
+#define  PCIE_CONF_ADDR(bus, devfn, where) \
+	(PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn))    | \
+	 PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \
+	 PCIE_CONF_ADDR_EN)
+#define PCIE_CONF_DATA_OFF	0x18fc
+#define PCIE_MASK_OFF		0x1910
+#define  PCIE_MASK_ENABLE_INTS          0x0f000000
+#define PCIE_CTRL_OFF		0x1a00
+#define  PCIE_CTRL_X1_MODE		0x0001
+#define PCIE_STAT_OFF		0x1a04
+#define  PCIE_STAT_BUS                  0xff00
+#define  PCIE_STAT_DEV                  0x1f0000
+#define  PCIE_STAT_LINK_DOWN		BIT(0)
+#define PCIE_RC_RTSTA		0x1a14
+#define PCIE_DEBUG_CTRL         0x1a60
+#define  PCIE_DEBUG_SOFT_RESET		BIT(20)
+
+struct mvebu_pcie_port;
+
+/* Structure representing all PCIe interfaces */
+struct mvebu_pcie {
+	struct platform_device *pdev;
+	struct mvebu_pcie_port *ports;
+	struct msi_controller *msi;
+	struct list_head resources;
+	struct resource io;
+	struct resource realio;
+	struct resource mem;
+	struct resource busn;
+	int nports;
+};
+
+struct mvebu_pcie_window {
+	phys_addr_t base;
+	phys_addr_t remap;
+	size_t size;
+};
+
+/* Structure representing one PCIe interface */
+struct mvebu_pcie_port {
+	char *name;
+	void __iomem *base;
+	u32 port;
+	u32 lane;
+	int devfn;
+	unsigned int mem_target;
+	unsigned int mem_attr;
+	unsigned int io_target;
+	unsigned int io_attr;
+	struct clk *clk;
+	struct gpio_desc *reset_gpio;
+	char *reset_name;
+	struct pci_bridge_emul bridge;
+	struct device_node *dn;
+	struct mvebu_pcie *pcie;
+	struct mvebu_pcie_window memwin;
+	struct mvebu_pcie_window iowin;
+	u32 saved_pcie_stat;
+	struct resource regs;
+};
+
+static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg)
+{
+	writel(val, port->base + reg);
+}
+
+static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg)
+{
+	return readl(port->base + reg);
+}
+
+static inline bool mvebu_has_ioport(struct mvebu_pcie_port *port)
+{
+	return port->io_target != -1 && port->io_attr != -1;
+}
+
+static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port)
+{
+	return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN);
+}
+
+static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr)
+{
+	u32 stat;
+
+	stat = mvebu_readl(port, PCIE_STAT_OFF);
+	stat &= ~PCIE_STAT_BUS;
+	stat |= nr << 8;
+	mvebu_writel(port, stat, PCIE_STAT_OFF);
+}
+
+static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr)
+{
+	u32 stat;
+
+	stat = mvebu_readl(port, PCIE_STAT_OFF);
+	stat &= ~PCIE_STAT_DEV;
+	stat |= nr << 16;
+	mvebu_writel(port, stat, PCIE_STAT_OFF);
+}
+
+/*
+ * Setup PCIE BARs and Address Decode Wins:
+ * BAR[0] -> internal registers (needed for MSI)
+ * BAR[1] -> covers all DRAM banks
+ * BAR[2] -> Disabled
+ * WIN[0-3] -> DRAM bank[0-3]
+ */
+static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
+{
+	const struct mbus_dram_target_info *dram;
+	u32 size;
+	int i;
+
+	dram = mv_mbus_dram_info();
+
+	/* First, disable and clear BARs and windows. */
+	for (i = 1; i < 3; i++) {
+		mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i));
+		mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i));
+		mvebu_writel(port, 0, PCIE_BAR_HI_OFF(i));
+	}
+
+	for (i = 0; i < 5; i++) {
+		mvebu_writel(port, 0, PCIE_WIN04_CTRL_OFF(i));
+		mvebu_writel(port, 0, PCIE_WIN04_BASE_OFF(i));
+		mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
+	}
+
+	mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF);
+	mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF);
+	mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF);
+
+	/* Setup windows for DDR banks.  Count total DDR size on the fly. */
+	size = 0;
+	for (i = 0; i < dram->num_cs; i++) {
+		const struct mbus_dram_window *cs = dram->cs + i;
+
+		mvebu_writel(port, cs->base & 0xffff0000,
+			     PCIE_WIN04_BASE_OFF(i));
+		mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
+		mvebu_writel(port,
+			     ((cs->size - 1) & 0xffff0000) |
+			     (cs->mbus_attr << 8) |
+			     (dram->mbus_dram_target_id << 4) | 1,
+			     PCIE_WIN04_CTRL_OFF(i));
+
+		size += cs->size;
+	}
+
+	/* Round up 'size' to the nearest power of two. */
+	if ((size & (size - 1)) != 0)
+		size = 1 << fls(size);
+
+	/* Setup BAR[1] to all DRAM banks. */
+	mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1));
+	mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1));
+	mvebu_writel(port, ((size - 1) & 0xffff0000) | 1,
+		     PCIE_BAR_CTRL_OFF(1));
+
+	/*
+	 * Point BAR[0] to the device's internal registers.
+	 */
+	mvebu_writel(port, round_down(port->regs.start, SZ_1M), PCIE_BAR_LO_OFF(0));
+	mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0));
+}
+
+static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
+{
+	u32 cmd, mask;
+
+	/* Point PCIe unit MBUS decode windows to DRAM space. */
+	mvebu_pcie_setup_wins(port);
+
+	/* Master + slave enable. */
+	cmd = mvebu_readl(port, PCIE_CMD_OFF);
+	cmd |= PCI_COMMAND_IO;
+	cmd |= PCI_COMMAND_MEMORY;
+	cmd |= PCI_COMMAND_MASTER;
+	mvebu_writel(port, cmd, PCIE_CMD_OFF);
+
+	/* Enable interrupt lines A-D. */
+	mask = mvebu_readl(port, PCIE_MASK_OFF);
+	mask |= PCIE_MASK_ENABLE_INTS;
+	mvebu_writel(port, mask, PCIE_MASK_OFF);
+}
+
+static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port,
+				 struct pci_bus *bus,
+				 u32 devfn, int where, int size, u32 *val)
+{
+	void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF;
+
+	mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
+		     PCIE_CONF_ADDR_OFF);
+
+	switch (size) {
+	case 1:
+		*val = readb_relaxed(conf_data + (where & 3));
+		break;
+	case 2:
+		*val = readw_relaxed(conf_data + (where & 2));
+		break;
+	case 4:
+		*val = readl_relaxed(conf_data);
+		break;
+	}
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port,
+				 struct pci_bus *bus,
+				 u32 devfn, int where, int size, u32 val)
+{
+	void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF;
+
+	mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
+		     PCIE_CONF_ADDR_OFF);
+
+	switch (size) {
+	case 1:
+		writeb(val, conf_data + (where & 3));
+		break;
+	case 2:
+		writew(val, conf_data + (where & 2));
+		break;
+	case 4:
+		writel(val, conf_data);
+		break;
+	default:
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+	}
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+/*
+ * Remove windows, starting from the largest ones to the smallest
+ * ones.
+ */
+static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
+				   phys_addr_t base, size_t size)
+{
+	while (size) {
+		size_t sz = 1 << (fls(size) - 1);
+
+		mvebu_mbus_del_window(base, sz);
+		base += sz;
+		size -= sz;
+	}
+}
+
+/*
+ * MBus windows can only have a power of two size, but PCI BARs do not
+ * have this constraint. Therefore, we have to split the PCI BAR into
+ * areas each having a power of two size. We start from the largest
+ * one (i.e highest order bit set in the size).
+ */
+static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
+				   unsigned int target, unsigned int attribute,
+				   phys_addr_t base, size_t size,
+				   phys_addr_t remap)
+{
+	size_t size_mapped = 0;
+
+	while (size) {
+		size_t sz = 1 << (fls(size) - 1);
+		int ret;
+
+		ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base,
+							sz, remap);
+		if (ret) {
+			phys_addr_t end = base + sz - 1;
+
+			dev_err(&port->pcie->pdev->dev,
+				"Could not create MBus window at [mem %pa-%pa]: %d\n",
+				&base, &end, ret);
+			mvebu_pcie_del_windows(port, base - size_mapped,
+					       size_mapped);
+			return;
+		}
+
+		size -= sz;
+		size_mapped += sz;
+		base += sz;
+		if (remap != MVEBU_MBUS_NO_REMAP)
+			remap += sz;
+	}
+}
+
+static void mvebu_pcie_set_window(struct mvebu_pcie_port *port,
+				  unsigned int target, unsigned int attribute,
+				  const struct mvebu_pcie_window *desired,
+				  struct mvebu_pcie_window *cur)
+{
+	if (desired->base == cur->base && desired->remap == cur->remap &&
+	    desired->size == cur->size)
+		return;
+
+	if (cur->size != 0) {
+		mvebu_pcie_del_windows(port, cur->base, cur->size);
+		cur->size = 0;
+		cur->base = 0;
+
+		/*
+		 * If something tries to change the window while it is enabled
+		 * the change will not be done atomically. That would be
+		 * difficult to do in the general case.
+		 */
+	}
+
+	if (desired->size == 0)
+		return;
+
+	mvebu_pcie_add_windows(port, target, attribute, desired->base,
+			       desired->size, desired->remap);
+	*cur = *desired;
+}
+
+static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
+{
+	struct mvebu_pcie_window desired = {};
+	struct pci_bridge_emul_conf *conf = &port->bridge.conf;
+
+	/* Are the new iobase/iolimit values invalid? */
+	if (conf->iolimit < conf->iobase ||
+	    conf->iolimitupper < conf->iobaseupper ||
+	    !(conf->command & PCI_COMMAND_IO)) {
+		mvebu_pcie_set_window(port, port->io_target, port->io_attr,
+				      &desired, &port->iowin);
+		return;
+	}
+
+	if (!mvebu_has_ioport(port)) {
+		dev_WARN(&port->pcie->pdev->dev,
+			 "Attempt to set IO when IO is disabled\n");
+		return;
+	}
+
+	/*
+	 * We read the PCI-to-PCI bridge emulated registers, and
+	 * calculate the base address and size of the address decoding
+	 * window to setup, according to the PCI-to-PCI bridge
+	 * specifications. iobase is the bus address, port->iowin_base
+	 * is the CPU address.
+	 */
+	desired.remap = ((conf->iobase & 0xF0) << 8) |
+			(conf->iobaseupper << 16);
+	desired.base = port->pcie->io.start + desired.remap;
+	desired.size = ((0xFFF | ((conf->iolimit & 0xF0) << 8) |
+			 (conf->iolimitupper << 16)) -
+			desired.remap) +
+		       1;
+
+	mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired,
+			      &port->iowin);
+}
+
+static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
+{
+	struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP};
+	struct pci_bridge_emul_conf *conf = &port->bridge.conf;
+
+	/* Are the new membase/memlimit values invalid? */
+	if (conf->memlimit < conf->membase ||
+	    !(conf->command & PCI_COMMAND_MEMORY)) {
+		mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
+				      &desired, &port->memwin);
+		return;
+	}
+
+	/*
+	 * We read the PCI-to-PCI bridge emulated registers, and
+	 * calculate the base address and size of the address decoding
+	 * window to setup, according to the PCI-to-PCI bridge
+	 * specifications.
+	 */
+	desired.base = ((conf->membase & 0xFFF0) << 16);
+	desired.size = (((conf->memlimit & 0xFFF0) << 16) | 0xFFFFF) -
+		       desired.base + 1;
+
+	mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
+			      &port->memwin);
+}
+
+static pci_bridge_emul_read_status_t
+mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
+				     int reg, u32 *value)
+{
+	struct mvebu_pcie_port *port = bridge->data;
+
+	switch (reg) {
+	case PCI_EXP_DEVCAP:
+		*value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP);
+		break;
+
+	case PCI_EXP_DEVCTL:
+		*value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL) &
+				 ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
+				   PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
+		break;
+
+	case PCI_EXP_LNKCAP:
+		/*
+		 * PCIe requires the clock power management capability to be
+		 * hard-wired to zero for downstream ports
+		 */
+		*value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) &
+			 ~PCI_EXP_LNKCAP_CLKPM;
+		break;
+
+	case PCI_EXP_LNKCTL:
+		*value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
+		break;
+
+	case PCI_EXP_SLTCTL:
+		*value = PCI_EXP_SLTSTA_PDS << 16;
+		break;
+
+	case PCI_EXP_RTSTA:
+		*value = mvebu_readl(port, PCIE_RC_RTSTA);
+		break;
+
+	default:
+		return PCI_BRIDGE_EMUL_NOT_HANDLED;
+	}
+
+	return PCI_BRIDGE_EMUL_HANDLED;
+}
+
+static void
+mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
+				      int reg, u32 old, u32 new, u32 mask)
+{
+	struct mvebu_pcie_port *port = bridge->data;
+	struct pci_bridge_emul_conf *conf = &bridge->conf;
+
+	switch (reg) {
+	case PCI_COMMAND:
+	{
+		if (!mvebu_has_ioport(port))
+			conf->command &= ~PCI_COMMAND_IO;
+
+		if ((old ^ new) & PCI_COMMAND_IO)
+			mvebu_pcie_handle_iobase_change(port);
+		if ((old ^ new) & PCI_COMMAND_MEMORY)
+			mvebu_pcie_handle_membase_change(port);
+
+		break;
+	}
+
+	case PCI_IO_BASE:
+		/*
+		 * We keep bit 1 set, it is a read-only bit that
+		 * indicates we support 32 bits addressing for the
+		 * I/O
+		 */
+		conf->iobase |= PCI_IO_RANGE_TYPE_32;
+		conf->iolimit |= PCI_IO_RANGE_TYPE_32;
+		mvebu_pcie_handle_iobase_change(port);
+		break;
+
+	case PCI_MEMORY_BASE:
+		mvebu_pcie_handle_membase_change(port);
+		break;
+
+	case PCI_IO_BASE_UPPER16:
+		mvebu_pcie_handle_iobase_change(port);
+		break;
+
+	case PCI_PRIMARY_BUS:
+		mvebu_pcie_set_local_bus_nr(port, conf->secondary_bus);
+		break;
+
+	default:
+		break;
+	}
+}
+
+static void
+mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
+				      int reg, u32 old, u32 new, u32 mask)
+{
+	struct mvebu_pcie_port *port = bridge->data;
+
+	switch (reg) {
+	case PCI_EXP_DEVCTL:
+		/*
+		 * Armada370 data says these bits must always
+		 * be zero when in root complex mode.
+		 */
+		new &= ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
+			 PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
+
+		mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
+		break;
+
+	case PCI_EXP_LNKCTL:
+		/*
+		 * If we don't support CLKREQ, we must ensure that the
+		 * CLKREQ enable bit always reads zero.  Since we haven't
+		 * had this capability, and it's dependent on board wiring,
+		 * disable it for the time being.
+		 */
+		new &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
+
+		mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
+		break;
+
+	case PCI_EXP_RTSTA:
+		mvebu_writel(port, new, PCIE_RC_RTSTA);
+		break;
+	}
+}
+
+struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
+	.write_base = mvebu_pci_bridge_emul_base_conf_write,
+	.read_pcie = mvebu_pci_bridge_emul_pcie_conf_read,
+	.write_pcie = mvebu_pci_bridge_emul_pcie_conf_write,
+};
+
+/*
+ * Initialize the configuration space of the PCI-to-PCI bridge
+ * associated with the given PCIe interface.
+ */
+static void mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
+{
+	struct pci_bridge_emul *bridge = &port->bridge;
+	u32 pcie_cap = mvebu_readl(port, PCIE_CAP_PCIEXP);
+	u8 pcie_cap_ver = ((pcie_cap >> 16) & PCI_EXP_FLAGS_VERS);
+
+	bridge->conf.vendor = PCI_VENDOR_ID_MARVELL;
+	bridge->conf.device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16;
+	bridge->conf.class_revision =
+		mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff;
+
+	if (mvebu_has_ioport(port)) {
+		/* We support 32 bits I/O addressing */
+		bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
+		bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
+	}
+
+	/*
+	 * Older mvebu hardware provides PCIe Capability structure only in
+	 * version 1. New hardware provides it in version 2.
+	 */
+	bridge->pcie_conf.cap = cpu_to_le16(pcie_cap_ver);
+
+	bridge->has_pcie = true;
+	bridge->data = port;
+	bridge->ops = &mvebu_pci_bridge_emul_ops;
+
+	pci_bridge_emul_init(bridge, PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR);
+}
+
+static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
+{
+	return sys->private_data;
+}
+
+static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
+						    struct pci_bus *bus,
+						    int devfn)
+{
+	int i;
+
+	for (i = 0; i < pcie->nports; i++) {
+		struct mvebu_pcie_port *port = &pcie->ports[i];
+
+		if (bus->number == 0 && port->devfn == devfn)
+			return port;
+		if (bus->number != 0 &&
+		    bus->number >= port->bridge.conf.secondary_bus &&
+		    bus->number <= port->bridge.conf.subordinate_bus)
+			return port;
+	}
+
+	return NULL;
+}
+
+/* PCI configuration space write function */
+static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+			      int where, int size, u32 val)
+{
+	struct mvebu_pcie *pcie = bus->sysdata;
+	struct mvebu_pcie_port *port;
+	int ret;
+
+	port = mvebu_pcie_find_port(pcie, bus, devfn);
+	if (!port)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	/* Access the emulated PCI-to-PCI bridge */
+	if (bus->number == 0)
+		return pci_bridge_emul_conf_write(&port->bridge, where,
+						  size, val);
+
+	if (!mvebu_pcie_link_up(port))
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	/* Access the real PCIe interface */
+	ret = mvebu_pcie_hw_wr_conf(port, bus, devfn,
+				    where, size, val);
+
+	return ret;
+}
+
+/* PCI configuration space read function */
+static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+			      int size, u32 *val)
+{
+	struct mvebu_pcie *pcie = bus->sysdata;
+	struct mvebu_pcie_port *port;
+	int ret;
+
+	port = mvebu_pcie_find_port(pcie, bus, devfn);
+	if (!port) {
+		*val = 0xffffffff;
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+
+	/* Access the emulated PCI-to-PCI bridge */
+	if (bus->number == 0)
+		return pci_bridge_emul_conf_read(&port->bridge, where,
+						 size, val);
+
+	if (!mvebu_pcie_link_up(port)) {
+		*val = 0xffffffff;
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+
+	/* Access the real PCIe interface */
+	ret = mvebu_pcie_hw_rd_conf(port, bus, devfn,
+				    where, size, val);
+
+	return ret;
+}
+
+static struct pci_ops mvebu_pcie_ops = {
+	.read = mvebu_pcie_rd_conf,
+	.write = mvebu_pcie_wr_conf,
+};
+
+static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
+						 const struct resource *res,
+						 resource_size_t start,
+						 resource_size_t size,
+						 resource_size_t align)
+{
+	if (dev->bus->number != 0)
+		return start;
+
+	/*
+	 * On the PCI-to-PCI bridge side, the I/O windows must have at
+	 * least a 64 KB size and the memory windows must have at
+	 * least a 1 MB size. Moreover, MBus windows need to have a
+	 * base address aligned on their size, and their size must be
+	 * a power of two. This means that if the BAR doesn't have a
+	 * power of two size, several MBus windows will actually be
+	 * created. We need to ensure that the biggest MBus window
+	 * (which will be the first one) is aligned on its size, which
+	 * explains the rounddown_pow_of_two() being done here.
+	 */
+	if (res->flags & IORESOURCE_IO)
+		return round_up(start, max_t(resource_size_t, SZ_64K,
+					     rounddown_pow_of_two(size)));
+	else if (res->flags & IORESOURCE_MEM)
+		return round_up(start, max_t(resource_size_t, SZ_1M,
+					     rounddown_pow_of_two(size)));
+	else
+		return start;
+}
+
+static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
+					      struct device_node *np,
+					      struct mvebu_pcie_port *port)
+{
+	int ret = 0;
+
+	ret = of_address_to_resource(np, 0, &port->regs);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return devm_ioremap_resource(&pdev->dev, &port->regs);
+}
+
+#define DT_FLAGS_TO_TYPE(flags)       (((flags) >> 24) & 0x03)
+#define    DT_TYPE_IO                 0x1
+#define    DT_TYPE_MEM32              0x2
+#define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF)
+#define DT_CPUADDR_TO_ATTR(cpuaddr)   (((cpuaddr) >> 48) & 0xFF)
+
+static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
+			      unsigned long type,
+			      unsigned int *tgt,
+			      unsigned int *attr)
+{
+	const int na = 3, ns = 2;
+	const __be32 *range;
+	int rlen, nranges, rangesz, pna, i;
+
+	*tgt = -1;
+	*attr = -1;
+
+	range = of_get_property(np, "ranges", &rlen);
+	if (!range)
+		return -EINVAL;
+
+	pna = of_n_addr_cells(np);
+	rangesz = pna + na + ns;
+	nranges = rlen / sizeof(__be32) / rangesz;
+
+	for (i = 0; i < nranges; i++, range += rangesz) {
+		u32 flags = of_read_number(range, 1);
+		u32 slot = of_read_number(range + 1, 1);
+		u64 cpuaddr = of_read_number(range + na, pna);
+		unsigned long rtype;
+
+		if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO)
+			rtype = IORESOURCE_IO;
+		else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
+			rtype = IORESOURCE_MEM;
+		else
+			continue;
+
+		if (slot == PCI_SLOT(devfn) && type == rtype) {
+			*tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
+			*attr = DT_CPUADDR_TO_ATTR(cpuaddr);
+			return 0;
+		}
+	}
+
+	return -ENOENT;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mvebu_pcie_suspend(struct device *dev)
+{
+	struct mvebu_pcie *pcie;
+	int i;
+
+	pcie = dev_get_drvdata(dev);
+	for (i = 0; i < pcie->nports; i++) {
+		struct mvebu_pcie_port *port = pcie->ports + i;
+		port->saved_pcie_stat = mvebu_readl(port, PCIE_STAT_OFF);
+	}
+
+	return 0;
+}
+
+static int mvebu_pcie_resume(struct device *dev)
+{
+	struct mvebu_pcie *pcie;
+	int i;
+
+	pcie = dev_get_drvdata(dev);
+	for (i = 0; i < pcie->nports; i++) {
+		struct mvebu_pcie_port *port = pcie->ports + i;
+		mvebu_writel(port, port->saved_pcie_stat, PCIE_STAT_OFF);
+		mvebu_pcie_setup_hw(port);
+	}
+
+	return 0;
+}
+#endif
+
+static void mvebu_pcie_port_clk_put(void *data)
+{
+	struct mvebu_pcie_port *port = data;
+
+	clk_put(port->clk);
+}
+
+static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
+	struct mvebu_pcie_port *port, struct device_node *child)
+{
+	struct device *dev = &pcie->pdev->dev;
+	enum of_gpio_flags flags;
+	int reset_gpio, ret;
+
+	port->pcie = pcie;
+
+	if (of_property_read_u32(child, "marvell,pcie-port", &port->port)) {
+		dev_warn(dev, "ignoring %pOF, missing pcie-port property\n",
+			 child);
+		goto skip;
+	}
+
+	if (of_property_read_u32(child, "marvell,pcie-lane", &port->lane))
+		port->lane = 0;
+
+	port->name = devm_kasprintf(dev, GFP_KERNEL, "pcie%d.%d", port->port,
+				    port->lane);
+	if (!port->name) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	port->devfn = of_pci_get_devfn(child);
+	if (port->devfn < 0)
+		goto skip;
+
+	ret = mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_MEM,
+				 &port->mem_target, &port->mem_attr);
+	if (ret < 0) {
+		dev_err(dev, "%s: cannot get tgt/attr for mem window\n",
+			port->name);
+		goto skip;
+	}
+
+	if (resource_size(&pcie->io) != 0) {
+		mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_IO,
+				   &port->io_target, &port->io_attr);
+	} else {
+		port->io_target = -1;
+		port->io_attr = -1;
+	}
+
+	reset_gpio = of_get_named_gpio_flags(child, "reset-gpios", 0, &flags);
+	if (reset_gpio == -EPROBE_DEFER) {
+		ret = reset_gpio;
+		goto err;
+	}
+
+	if (gpio_is_valid(reset_gpio)) {
+		unsigned long gpio_flags;
+
+		port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset",
+						  port->name);
+		if (!port->reset_name) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		if (flags & OF_GPIO_ACTIVE_LOW) {
+			dev_info(dev, "%pOF: reset gpio is active low\n",
+				 child);
+			gpio_flags = GPIOF_ACTIVE_LOW |
+				     GPIOF_OUT_INIT_LOW;
+		} else {
+			gpio_flags = GPIOF_OUT_INIT_HIGH;
+		}
+
+		ret = devm_gpio_request_one(dev, reset_gpio, gpio_flags,
+					    port->reset_name);
+		if (ret) {
+			if (ret == -EPROBE_DEFER)
+				goto err;
+			goto skip;
+		}
+
+		port->reset_gpio = gpio_to_desc(reset_gpio);
+	}
+
+	port->clk = of_clk_get_by_name(child, NULL);
+	if (IS_ERR(port->clk)) {
+		dev_err(dev, "%s: cannot get clock\n", port->name);
+		goto skip;
+	}
+
+	ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port);
+	if (ret < 0) {
+		clk_put(port->clk);
+		goto err;
+	}
+
+	return 1;
+
+skip:
+	ret = 0;
+
+	/* In the case of skipping, we need to free these */
+	devm_kfree(dev, port->reset_name);
+	port->reset_name = NULL;
+	devm_kfree(dev, port->name);
+	port->name = NULL;
+
+err:
+	return ret;
+}
+
+/*
+ * Power up a PCIe port.  PCIe requires the refclk to be stable for 100µs
+ * prior to releasing PERST.  See table 2-4 in section 2.6.2 AC Specifications
+ * of the PCI Express Card Electromechanical Specification, 1.1.
+ */
+static int mvebu_pcie_powerup(struct mvebu_pcie_port *port)
+{
+	int ret;
+
+	ret = clk_prepare_enable(port->clk);
+	if (ret < 0)
+		return ret;
+
+	if (port->reset_gpio) {
+		u32 reset_udelay = PCI_PM_D3COLD_WAIT * 1000;
+
+		of_property_read_u32(port->dn, "reset-delay-us",
+				     &reset_udelay);
+
+		udelay(100);
+
+		gpiod_set_value_cansleep(port->reset_gpio, 0);
+		msleep(reset_udelay / 1000);
+	}
+
+	return 0;
+}
+
+/*
+ * Power down a PCIe port.  Strictly, PCIe requires us to place the card
+ * in D3hot state before asserting PERST#.
+ */
+static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
+{
+	gpiod_set_value_cansleep(port->reset_gpio, 1);
+
+	clk_disable_unprepare(port->clk);
+}
+
+/*
+ * We can't use devm_of_pci_get_host_bridge_resources() because we
+ * need to parse our special DT properties encoding the MEM and IO
+ * apertures.
+ */
+static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
+{
+	struct device *dev = &pcie->pdev->dev;
+	struct device_node *np = dev->of_node;
+	int ret;
+
+	INIT_LIST_HEAD(&pcie->resources);
+
+	/* Get the bus range */
+	ret = of_pci_parse_bus_range(np, &pcie->busn);
+	if (ret) {
+		dev_err(dev, "failed to parse bus-range property: %d\n", ret);
+		return ret;
+	}
+	pci_add_resource(&pcie->resources, &pcie->busn);
+
+	/* Get the PCIe memory aperture */
+	mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
+	if (resource_size(&pcie->mem) == 0) {
+		dev_err(dev, "invalid memory aperture size\n");
+		return -EINVAL;
+	}
+
+	pcie->mem.name = "PCI MEM";
+	pci_add_resource(&pcie->resources, &pcie->mem);
+
+	/* Get the PCIe IO aperture */
+	mvebu_mbus_get_pcie_io_aperture(&pcie->io);
+
+	if (resource_size(&pcie->io) != 0) {
+		pcie->realio.flags = pcie->io.flags;
+		pcie->realio.start = PCIBIOS_MIN_IO;
+		pcie->realio.end = min_t(resource_size_t,
+					 IO_SPACE_LIMIT - SZ_64K,
+					 resource_size(&pcie->io) - 1);
+		pcie->realio.name = "PCI I/O";
+
+		pci_add_resource(&pcie->resources, &pcie->realio);
+	}
+
+	return devm_request_pci_bus_resources(dev, &pcie->resources);
+}
+
+/*
+ * This is a copy of pci_host_probe(), except that it does the I/O
+ * remap as the last step, once we are sure we won't fail.
+ *
+ * It should be removed once the I/O remap error handling issue has
+ * been sorted out.
+ */
+static int mvebu_pci_host_probe(struct pci_host_bridge *bridge)
+{
+	struct mvebu_pcie *pcie;
+	struct pci_bus *bus, *child;
+	int ret;
+
+	ret = pci_scan_root_bus_bridge(bridge);
+	if (ret < 0) {
+		dev_err(bridge->dev.parent, "Scanning root bridge failed");
+		return ret;
+	}
+
+	pcie = pci_host_bridge_priv(bridge);
+	if (resource_size(&pcie->io) != 0) {
+		unsigned int i;
+
+		for (i = 0; i < resource_size(&pcie->realio); i += SZ_64K)
+			pci_ioremap_io(i, pcie->io.start + i);
+	}
+
+	bus = bridge->bus;
+
+	/*
+	 * We insert PCI resources into the iomem_resource and
+	 * ioport_resource trees in either pci_bus_claim_resources()
+	 * or pci_bus_assign_resources().
+	 */
+	if (pci_has_flag(PCI_PROBE_ONLY)) {
+		pci_bus_claim_resources(bus);
+	} else {
+		pci_bus_size_bridges(bus);
+		pci_bus_assign_resources(bus);
+
+		list_for_each_entry(child, &bus->children, node)
+			pcie_bus_configure_settings(child);
+	}
+
+	pci_bus_add_devices(bus);
+	return 0;
+}
+
+static int mvebu_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct mvebu_pcie *pcie;
+	struct pci_host_bridge *bridge;
+	struct device_node *np = dev->of_node;
+	struct device_node *child;
+	int num, i, ret;
+
+	bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct mvebu_pcie));
+	if (!bridge)
+		return -ENOMEM;
+
+	pcie = pci_host_bridge_priv(bridge);
+	pcie->pdev = pdev;
+	platform_set_drvdata(pdev, pcie);
+
+	ret = mvebu_pcie_parse_request_resources(pcie);
+	if (ret)
+		return ret;
+
+	num = of_get_available_child_count(np);
+
+	pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL);
+	if (!pcie->ports)
+		return -ENOMEM;
+
+	i = 0;
+	for_each_available_child_of_node(np, child) {
+		struct mvebu_pcie_port *port = &pcie->ports[i];
+
+		ret = mvebu_pcie_parse_port(pcie, port, child);
+		if (ret < 0) {
+			of_node_put(child);
+			return ret;
+		} else if (ret == 0) {
+			continue;
+		}
+
+		port->dn = child;
+		i++;
+	}
+	pcie->nports = i;
+
+	for (i = 0; i < pcie->nports; i++) {
+		struct mvebu_pcie_port *port = &pcie->ports[i];
+
+		child = port->dn;
+		if (!child)
+			continue;
+
+		ret = mvebu_pcie_powerup(port);
+		if (ret < 0)
+			continue;
+
+		port->base = mvebu_pcie_map_registers(pdev, child, port);
+		if (IS_ERR(port->base)) {
+			dev_err(dev, "%s: cannot map registers\n", port->name);
+			port->base = NULL;
+			mvebu_pcie_powerdown(port);
+			continue;
+		}
+
+		mvebu_pcie_setup_hw(port);
+		mvebu_pcie_set_local_dev_nr(port, 1);
+		mvebu_pci_bridge_emul_init(port);
+	}
+
+	pcie->nports = i;
+
+	list_splice_init(&pcie->resources, &bridge->windows);
+	bridge->dev.parent = dev;
+	bridge->sysdata = pcie;
+	bridge->busnr = 0;
+	bridge->ops = &mvebu_pcie_ops;
+	bridge->map_irq = of_irq_parse_and_map_pci;
+	bridge->swizzle_irq = pci_common_swizzle;
+	bridge->align_resource = mvebu_pcie_align_resource;
+	bridge->msi = pcie->msi;
+
+	return mvebu_pci_host_probe(bridge);
+}
+
+static const struct of_device_id mvebu_pcie_of_match_table[] = {
+	{ .compatible = "marvell,armada-xp-pcie", },
+	{ .compatible = "marvell,armada-370-pcie", },
+	{ .compatible = "marvell,dove-pcie", },
+	{ .compatible = "marvell,kirkwood-pcie", },
+	{},
+};
+
+static const struct dev_pm_ops mvebu_pcie_pm_ops = {
+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
+};
+
+static struct platform_driver mvebu_pcie_driver = {
+	.driver = {
+		.name = "mvebu-pcie",
+		.of_match_table = mvebu_pcie_of_match_table,
+		/* driver unloading/unbinding currently not supported */
+		.suppress_bind_attrs = true,
+		.pm = &mvebu_pcie_pm_ops,
+	},
+	.probe = mvebu_pcie_probe,
+};
+builtin_platform_driver(mvebu_pcie_driver);
diff --git a/marvell/linux/drivers/pci/controller/pci-rcar-gen2.c b/marvell/linux/drivers/pci/controller/pci-rcar-gen2.c
new file mode 100644
index 0000000..326171c
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pci-rcar-gen2.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  pci-rcar-gen2: internal PCI bus support
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc.
+ *
+ * Author: Valentine Barshak <valentine.barshak@cogentembedded.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+#include "../pci.h"
+
+/* AHB-PCI Bridge PCI communication registers */
+#define RCAR_AHBPCI_PCICOM_OFFSET	0x800
+
+#define RCAR_PCIAHB_WIN1_CTR_REG	(RCAR_AHBPCI_PCICOM_OFFSET + 0x00)
+#define RCAR_PCIAHB_WIN2_CTR_REG	(RCAR_AHBPCI_PCICOM_OFFSET + 0x04)
+#define RCAR_PCIAHB_PREFETCH0		0x0
+#define RCAR_PCIAHB_PREFETCH4		0x1
+#define RCAR_PCIAHB_PREFETCH8		0x2
+#define RCAR_PCIAHB_PREFETCH16		0x3
+
+#define RCAR_AHBPCI_WIN1_CTR_REG	(RCAR_AHBPCI_PCICOM_OFFSET + 0x10)
+#define RCAR_AHBPCI_WIN2_CTR_REG	(RCAR_AHBPCI_PCICOM_OFFSET + 0x14)
+#define RCAR_AHBPCI_WIN_CTR_MEM		(3 << 1)
+#define RCAR_AHBPCI_WIN_CTR_CFG		(5 << 1)
+#define RCAR_AHBPCI_WIN1_HOST		(1 << 30)
+#define RCAR_AHBPCI_WIN1_DEVICE		(1 << 31)
+
+#define RCAR_PCI_INT_ENABLE_REG		(RCAR_AHBPCI_PCICOM_OFFSET + 0x20)
+#define RCAR_PCI_INT_STATUS_REG		(RCAR_AHBPCI_PCICOM_OFFSET + 0x24)
+#define RCAR_PCI_INT_SIGTABORT		(1 << 0)
+#define RCAR_PCI_INT_SIGRETABORT	(1 << 1)
+#define RCAR_PCI_INT_REMABORT		(1 << 2)
+#define RCAR_PCI_INT_PERR		(1 << 3)
+#define RCAR_PCI_INT_SIGSERR		(1 << 4)
+#define RCAR_PCI_INT_RESERR		(1 << 5)
+#define RCAR_PCI_INT_WIN1ERR		(1 << 12)
+#define RCAR_PCI_INT_WIN2ERR		(1 << 13)
+#define RCAR_PCI_INT_A			(1 << 16)
+#define RCAR_PCI_INT_B			(1 << 17)
+#define RCAR_PCI_INT_PME		(1 << 19)
+#define RCAR_PCI_INT_ALLERRORS (RCAR_PCI_INT_SIGTABORT		| \
+				RCAR_PCI_INT_SIGRETABORT	| \
+				RCAR_PCI_INT_REMABORT		| \
+				RCAR_PCI_INT_PERR		| \
+				RCAR_PCI_INT_SIGSERR		| \
+				RCAR_PCI_INT_RESERR		| \
+				RCAR_PCI_INT_WIN1ERR		| \
+				RCAR_PCI_INT_WIN2ERR)
+
+#define RCAR_AHB_BUS_CTR_REG		(RCAR_AHBPCI_PCICOM_OFFSET + 0x30)
+#define RCAR_AHB_BUS_MMODE_HTRANS	(1 << 0)
+#define RCAR_AHB_BUS_MMODE_BYTE_BURST	(1 << 1)
+#define RCAR_AHB_BUS_MMODE_WR_INCR	(1 << 2)
+#define RCAR_AHB_BUS_MMODE_HBUS_REQ	(1 << 7)
+#define RCAR_AHB_BUS_SMODE_READYCTR	(1 << 17)
+#define RCAR_AHB_BUS_MODE		(RCAR_AHB_BUS_MMODE_HTRANS |	\
+					RCAR_AHB_BUS_MMODE_BYTE_BURST |	\
+					RCAR_AHB_BUS_MMODE_WR_INCR |	\
+					RCAR_AHB_BUS_MMODE_HBUS_REQ |	\
+					RCAR_AHB_BUS_SMODE_READYCTR)
+
+#define RCAR_USBCTR_REG			(RCAR_AHBPCI_PCICOM_OFFSET + 0x34)
+#define RCAR_USBCTR_USBH_RST		(1 << 0)
+#define RCAR_USBCTR_PCICLK_MASK		(1 << 1)
+#define RCAR_USBCTR_PLL_RST		(1 << 2)
+#define RCAR_USBCTR_DIRPD		(1 << 8)
+#define RCAR_USBCTR_PCIAHB_WIN2_EN	(1 << 9)
+#define RCAR_USBCTR_PCIAHB_WIN1_256M	(0 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_512M	(1 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_1G	(2 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_2G	(3 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_MASK	(3 << 10)
+
+#define RCAR_PCI_ARBITER_CTR_REG	(RCAR_AHBPCI_PCICOM_OFFSET + 0x40)
+#define RCAR_PCI_ARBITER_PCIREQ0	(1 << 0)
+#define RCAR_PCI_ARBITER_PCIREQ1	(1 << 1)
+#define RCAR_PCI_ARBITER_PCIBP_MODE	(1 << 12)
+
+#define RCAR_PCI_UNIT_REV_REG		(RCAR_AHBPCI_PCICOM_OFFSET + 0x48)
+
+struct rcar_pci_priv {
+	struct device *dev;
+	void __iomem *reg;
+	struct resource mem_res;
+	struct resource *cfg_res;
+	unsigned busnr;
+	int irq;
+	unsigned long window_size;
+	unsigned long window_addr;
+	unsigned long window_pci;
+};
+
+/* PCI configuration space operations */
+static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn,
+				       int where)
+{
+	struct pci_sys_data *sys = bus->sysdata;
+	struct rcar_pci_priv *priv = sys->private_data;
+	int slot, val;
+
+	if (sys->busnr != bus->number || PCI_FUNC(devfn))
+		return NULL;
+
+	/* Only one EHCI/OHCI device built-in */
+	slot = PCI_SLOT(devfn);
+	if (slot > 2)
+		return NULL;
+
+	/* bridge logic only has registers to 0x40 */
+	if (slot == 0x0 && where >= 0x40)
+		return NULL;
+
+	val = slot ? RCAR_AHBPCI_WIN1_DEVICE | RCAR_AHBPCI_WIN_CTR_CFG :
+		     RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG;
+
+	iowrite32(val, priv->reg + RCAR_AHBPCI_WIN1_CTR_REG);
+	return priv->reg + (slot >> 1) * 0x100 + where;
+}
+
+/* PCI interrupt mapping */
+static int rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+	struct pci_sys_data *sys = dev->bus->sysdata;
+	struct rcar_pci_priv *priv = sys->private_data;
+	int irq;
+
+	irq = of_irq_parse_and_map_pci(dev, slot, pin);
+	if (!irq)
+		irq = priv->irq;
+
+	return irq;
+}
+
+#ifdef CONFIG_PCI_DEBUG
+/* if debug enabled, then attach an error handler irq to the bridge */
+
+static irqreturn_t rcar_pci_err_irq(int irq, void *pw)
+{
+	struct rcar_pci_priv *priv = pw;
+	struct device *dev = priv->dev;
+	u32 status = ioread32(priv->reg + RCAR_PCI_INT_STATUS_REG);
+
+	if (status & RCAR_PCI_INT_ALLERRORS) {
+		dev_err(dev, "error irq: status %08x\n", status);
+
+		/* clear the error(s) */
+		iowrite32(status & RCAR_PCI_INT_ALLERRORS,
+			  priv->reg + RCAR_PCI_INT_STATUS_REG);
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+}
+
+static void rcar_pci_setup_errirq(struct rcar_pci_priv *priv)
+{
+	struct device *dev = priv->dev;
+	int ret;
+	u32 val;
+
+	ret = devm_request_irq(dev, priv->irq, rcar_pci_err_irq,
+			       IRQF_SHARED, "error irq", priv);
+	if (ret) {
+		dev_err(dev, "cannot claim IRQ for error handling\n");
+		return;
+	}
+
+	val = ioread32(priv->reg + RCAR_PCI_INT_ENABLE_REG);
+	val |= RCAR_PCI_INT_ALLERRORS;
+	iowrite32(val, priv->reg + RCAR_PCI_INT_ENABLE_REG);
+}
+#else
+static inline void rcar_pci_setup_errirq(struct rcar_pci_priv *priv) { }
+#endif
+
+/* PCI host controller setup */
+static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
+{
+	struct rcar_pci_priv *priv = sys->private_data;
+	struct device *dev = priv->dev;
+	void __iomem *reg = priv->reg;
+	u32 val;
+	int ret;
+
+	pm_runtime_enable(dev);
+	pm_runtime_get_sync(dev);
+
+	val = ioread32(reg + RCAR_PCI_UNIT_REV_REG);
+	dev_info(dev, "PCI: bus%u revision %x\n", sys->busnr, val);
+
+	/* Disable Direct Power Down State and assert reset */
+	val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD;
+	val |= RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST;
+	iowrite32(val, reg + RCAR_USBCTR_REG);
+	udelay(4);
+
+	/* De-assert reset and reset PCIAHB window1 size */
+	val &= ~(RCAR_USBCTR_PCIAHB_WIN1_MASK | RCAR_USBCTR_PCICLK_MASK |
+		 RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST);
+
+	/* Setup PCIAHB window1 size */
+	switch (priv->window_size) {
+	case SZ_2G:
+		val |= RCAR_USBCTR_PCIAHB_WIN1_2G;
+		break;
+	case SZ_1G:
+		val |= RCAR_USBCTR_PCIAHB_WIN1_1G;
+		break;
+	case SZ_512M:
+		val |= RCAR_USBCTR_PCIAHB_WIN1_512M;
+		break;
+	default:
+		pr_warn("unknown window size %ld - defaulting to 256M\n",
+			priv->window_size);
+		priv->window_size = SZ_256M;
+		/* fall-through */
+	case SZ_256M:
+		val |= RCAR_USBCTR_PCIAHB_WIN1_256M;
+		break;
+	}
+	iowrite32(val, reg + RCAR_USBCTR_REG);
+
+	/* Configure AHB master and slave modes */
+	iowrite32(RCAR_AHB_BUS_MODE, reg + RCAR_AHB_BUS_CTR_REG);
+
+	/* Configure PCI arbiter */
+	val = ioread32(reg + RCAR_PCI_ARBITER_CTR_REG);
+	val |= RCAR_PCI_ARBITER_PCIREQ0 | RCAR_PCI_ARBITER_PCIREQ1 |
+	       RCAR_PCI_ARBITER_PCIBP_MODE;
+	iowrite32(val, reg + RCAR_PCI_ARBITER_CTR_REG);
+
+	/* PCI-AHB mapping */
+	iowrite32(priv->window_addr | RCAR_PCIAHB_PREFETCH16,
+		  reg + RCAR_PCIAHB_WIN1_CTR_REG);
+
+	/* AHB-PCI mapping: OHCI/EHCI registers */
+	val = priv->mem_res.start | RCAR_AHBPCI_WIN_CTR_MEM;
+	iowrite32(val, reg + RCAR_AHBPCI_WIN2_CTR_REG);
+
+	/* Enable AHB-PCI bridge PCI configuration access */
+	iowrite32(RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG,
+		  reg + RCAR_AHBPCI_WIN1_CTR_REG);
+	/* Set PCI-AHB Window1 address */
+	iowrite32(priv->window_pci | PCI_BASE_ADDRESS_MEM_PREFETCH,
+		  reg + PCI_BASE_ADDRESS_1);
+	/* Set AHB-PCI bridge PCI communication area address */
+	val = priv->cfg_res->start + RCAR_AHBPCI_PCICOM_OFFSET;
+	iowrite32(val, reg + PCI_BASE_ADDRESS_0);
+
+	val = ioread32(reg + PCI_COMMAND);
+	val |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY |
+	       PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
+	iowrite32(val, reg + PCI_COMMAND);
+
+	/* Enable PCI interrupts */
+	iowrite32(RCAR_PCI_INT_A | RCAR_PCI_INT_B | RCAR_PCI_INT_PME,
+		  reg + RCAR_PCI_INT_ENABLE_REG);
+
+	if (priv->irq > 0)
+		rcar_pci_setup_errirq(priv);
+
+	/* Add PCI resources */
+	pci_add_resource(&sys->resources, &priv->mem_res);
+	ret = devm_request_pci_bus_resources(dev, &sys->resources);
+	if (ret < 0)
+		return ret;
+
+	/* Setup bus number based on platform device id / of bus-range */
+	sys->busnr = priv->busnr;
+	return 1;
+}
+
+static struct pci_ops rcar_pci_ops = {
+	.map_bus = rcar_pci_cfg_base,
+	.read	= pci_generic_config_read,
+	.write	= pci_generic_config_write,
+};
+
+static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci,
+					 struct device_node *np)
+{
+	struct device *dev = pci->dev;
+	struct of_pci_range range;
+	struct of_pci_range_parser parser;
+	int index = 0;
+
+	/* Failure to parse is ok as we fall back to defaults */
+	if (of_pci_dma_range_parser_init(&parser, np))
+		return 0;
+
+	/* Get the dma-ranges from DT */
+	for_each_of_pci_range(&parser, &range) {
+		/* Hardware only allows one inbound 32-bit range */
+		if (index)
+			return -EINVAL;
+
+		pci->window_addr = (unsigned long)range.cpu_addr;
+		pci->window_pci = (unsigned long)range.pci_addr;
+		pci->window_size = (unsigned long)range.size;
+
+		/* Catch HW limitations */
+		if (!(range.flags & IORESOURCE_PREFETCH)) {
+			dev_err(dev, "window must be prefetchable\n");
+			return -EINVAL;
+		}
+		if (pci->window_addr) {
+			u32 lowaddr = 1 << (ffs(pci->window_addr) - 1);
+
+			if (lowaddr < pci->window_size) {
+				dev_err(dev, "invalid window size/addr\n");
+				return -EINVAL;
+			}
+		}
+		index++;
+	}
+
+	return 0;
+}
+
+static int rcar_pci_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *cfg_res, *mem_res;
+	struct rcar_pci_priv *priv;
+	void __iomem *reg;
+	struct hw_pci hw;
+	void *hw_private[1];
+
+	cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	reg = devm_ioremap_resource(dev, cfg_res);
+	if (IS_ERR(reg))
+		return PTR_ERR(reg);
+
+	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!mem_res || !mem_res->start)
+		return -ENODEV;
+
+	if (mem_res->start & 0xFFFF)
+		return -EINVAL;
+
+	priv = devm_kzalloc(dev, sizeof(struct rcar_pci_priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->mem_res = *mem_res;
+	priv->cfg_res = cfg_res;
+
+	priv->irq = platform_get_irq(pdev, 0);
+	priv->reg = reg;
+	priv->dev = dev;
+
+	if (priv->irq < 0) {
+		dev_err(dev, "no valid irq found\n");
+		return priv->irq;
+	}
+
+	/* default window addr and size if not specified in DT */
+	priv->window_addr = 0x40000000;
+	priv->window_pci = 0x40000000;
+	priv->window_size = SZ_1G;
+
+	if (dev->of_node) {
+		struct resource busnr;
+		int ret;
+
+		ret = of_pci_parse_bus_range(dev->of_node, &busnr);
+		if (ret < 0) {
+			dev_err(dev, "failed to parse bus-range\n");
+			return ret;
+		}
+
+		priv->busnr = busnr.start;
+		if (busnr.end != busnr.start)
+			dev_warn(dev, "only one bus number supported\n");
+
+		ret = rcar_pci_parse_map_dma_ranges(priv, dev->of_node);
+		if (ret < 0) {
+			dev_err(dev, "failed to parse dma-range\n");
+			return ret;
+		}
+	} else {
+		priv->busnr = pdev->id;
+	}
+
+	hw_private[0] = priv;
+	memset(&hw, 0, sizeof(hw));
+	hw.nr_controllers = ARRAY_SIZE(hw_private);
+	hw.io_optional = 1;
+	hw.private_data = hw_private;
+	hw.map_irq = rcar_pci_map_irq;
+	hw.ops = &rcar_pci_ops;
+	hw.setup = rcar_pci_setup;
+	pci_common_init_dev(dev, &hw);
+	return 0;
+}
+
+static const struct of_device_id rcar_pci_of_match[] = {
+	{ .compatible = "renesas,pci-r8a7790", },
+	{ .compatible = "renesas,pci-r8a7791", },
+	{ .compatible = "renesas,pci-r8a7794", },
+	{ .compatible = "renesas,pci-rcar-gen2", },
+	{ },
+};
+
+static struct platform_driver rcar_pci_driver = {
+	.driver = {
+		.name = "pci-rcar-gen2",
+		.suppress_bind_attrs = true,
+		.of_match_table = rcar_pci_of_match,
+	},
+	.probe = rcar_pci_probe,
+};
+builtin_platform_driver(rcar_pci_driver);
diff --git a/marvell/linux/drivers/pci/controller/pci-tegra.c b/marvell/linux/drivers/pci/controller/pci-tegra.c
new file mode 100644
index 0000000..74c0ddd
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pci-tegra.c
@@ -0,0 +1,2954 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PCIe host controller driver for Tegra SoCs
+ *
+ * Copyright (c) 2010, CompuLab, Ltd.
+ * Author: Mike Rapoport <mike@compulab.co.il>
+ *
+ * Based on NVIDIA PCIe driver
+ * Copyright (c) 2008-2009, NVIDIA Corporation.
+ *
+ * Bits taken from arch/arm/mach-dove/pcie.c
+ *
+ * Author: Thierry Reding <treding@nvidia.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/regulator/consumer.h>
+
+#include <soc/tegra/cpuidle.h>
+#include <soc/tegra/pmc.h>
+
+#include "../pci.h"
+
+#define INT_PCI_MSI_NR (8 * 32)
+
+/* register definitions */
+
+#define AFI_AXI_BAR0_SZ	0x00
+#define AFI_AXI_BAR1_SZ	0x04
+#define AFI_AXI_BAR2_SZ	0x08
+#define AFI_AXI_BAR3_SZ	0x0c
+#define AFI_AXI_BAR4_SZ	0x10
+#define AFI_AXI_BAR5_SZ	0x14
+
+#define AFI_AXI_BAR0_START	0x18
+#define AFI_AXI_BAR1_START	0x1c
+#define AFI_AXI_BAR2_START	0x20
+#define AFI_AXI_BAR3_START	0x24
+#define AFI_AXI_BAR4_START	0x28
+#define AFI_AXI_BAR5_START	0x2c
+
+#define AFI_FPCI_BAR0	0x30
+#define AFI_FPCI_BAR1	0x34
+#define AFI_FPCI_BAR2	0x38
+#define AFI_FPCI_BAR3	0x3c
+#define AFI_FPCI_BAR4	0x40
+#define AFI_FPCI_BAR5	0x44
+
+#define AFI_CACHE_BAR0_SZ	0x48
+#define AFI_CACHE_BAR0_ST	0x4c
+#define AFI_CACHE_BAR1_SZ	0x50
+#define AFI_CACHE_BAR1_ST	0x54
+
+#define AFI_MSI_BAR_SZ		0x60
+#define AFI_MSI_FPCI_BAR_ST	0x64
+#define AFI_MSI_AXI_BAR_ST	0x68
+
+#define AFI_MSI_VEC0		0x6c
+#define AFI_MSI_VEC1		0x70
+#define AFI_MSI_VEC2		0x74
+#define AFI_MSI_VEC3		0x78
+#define AFI_MSI_VEC4		0x7c
+#define AFI_MSI_VEC5		0x80
+#define AFI_MSI_VEC6		0x84
+#define AFI_MSI_VEC7		0x88
+
+#define AFI_MSI_EN_VEC0		0x8c
+#define AFI_MSI_EN_VEC1		0x90
+#define AFI_MSI_EN_VEC2		0x94
+#define AFI_MSI_EN_VEC3		0x98
+#define AFI_MSI_EN_VEC4		0x9c
+#define AFI_MSI_EN_VEC5		0xa0
+#define AFI_MSI_EN_VEC6		0xa4
+#define AFI_MSI_EN_VEC7		0xa8
+
+#define AFI_CONFIGURATION		0xac
+#define  AFI_CONFIGURATION_EN_FPCI		(1 << 0)
+#define  AFI_CONFIGURATION_CLKEN_OVERRIDE	(1 << 31)
+
+#define AFI_FPCI_ERROR_MASKS	0xb0
+
+#define AFI_INTR_MASK		0xb4
+#define  AFI_INTR_MASK_INT_MASK	(1 << 0)
+#define  AFI_INTR_MASK_MSI_MASK	(1 << 8)
+
+#define AFI_INTR_CODE			0xb8
+#define  AFI_INTR_CODE_MASK		0xf
+#define  AFI_INTR_INI_SLAVE_ERROR	1
+#define  AFI_INTR_INI_DECODE_ERROR	2
+#define  AFI_INTR_TARGET_ABORT		3
+#define  AFI_INTR_MASTER_ABORT		4
+#define  AFI_INTR_INVALID_WRITE		5
+#define  AFI_INTR_LEGACY		6
+#define  AFI_INTR_FPCI_DECODE_ERROR	7
+#define  AFI_INTR_AXI_DECODE_ERROR	8
+#define  AFI_INTR_FPCI_TIMEOUT		9
+#define  AFI_INTR_PE_PRSNT_SENSE	10
+#define  AFI_INTR_PE_CLKREQ_SENSE	11
+#define  AFI_INTR_CLKCLAMP_SENSE	12
+#define  AFI_INTR_RDY4PD_SENSE		13
+#define  AFI_INTR_P2P_ERROR		14
+
+#define AFI_INTR_SIGNATURE	0xbc
+#define AFI_UPPER_FPCI_ADDRESS	0xc0
+#define AFI_SM_INTR_ENABLE	0xc4
+#define  AFI_SM_INTR_INTA_ASSERT	(1 << 0)
+#define  AFI_SM_INTR_INTB_ASSERT	(1 << 1)
+#define  AFI_SM_INTR_INTC_ASSERT	(1 << 2)
+#define  AFI_SM_INTR_INTD_ASSERT	(1 << 3)
+#define  AFI_SM_INTR_INTA_DEASSERT	(1 << 4)
+#define  AFI_SM_INTR_INTB_DEASSERT	(1 << 5)
+#define  AFI_SM_INTR_INTC_DEASSERT	(1 << 6)
+#define  AFI_SM_INTR_INTD_DEASSERT	(1 << 7)
+
+#define AFI_AFI_INTR_ENABLE		0xc8
+#define  AFI_INTR_EN_INI_SLVERR		(1 << 0)
+#define  AFI_INTR_EN_INI_DECERR		(1 << 1)
+#define  AFI_INTR_EN_TGT_SLVERR		(1 << 2)
+#define  AFI_INTR_EN_TGT_DECERR		(1 << 3)
+#define  AFI_INTR_EN_TGT_WRERR		(1 << 4)
+#define  AFI_INTR_EN_DFPCI_DECERR	(1 << 5)
+#define  AFI_INTR_EN_AXI_DECERR		(1 << 6)
+#define  AFI_INTR_EN_FPCI_TIMEOUT	(1 << 7)
+#define  AFI_INTR_EN_PRSNT_SENSE	(1 << 8)
+
+#define AFI_PCIE_PME		0xf0
+
+#define AFI_PCIE_CONFIG					0x0f8
+#define  AFI_PCIE_CONFIG_PCIE_DISABLE(x)		(1 << ((x) + 1))
+#define  AFI_PCIE_CONFIG_PCIE_DISABLE_ALL		0xe
+#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK	(0xf << 20)
+#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE	(0x0 << 20)
+#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420	(0x0 << 20)
+#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1	(0x0 << 20)
+#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401	(0x0 << 20)
+#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL	(0x1 << 20)
+#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222	(0x1 << 20)
+#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1	(0x1 << 20)
+#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211	(0x1 << 20)
+#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411	(0x2 << 20)
+#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111	(0x2 << 20)
+#define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(x)		(1 << ((x) + 29))
+#define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL		(0x7 << 29)
+
+#define AFI_FUSE			0x104
+#define  AFI_FUSE_PCIE_T0_GEN2_DIS	(1 << 2)
+
+#define AFI_PEX0_CTRL			0x110
+#define AFI_PEX1_CTRL			0x118
+#define  AFI_PEX_CTRL_RST		(1 << 0)
+#define  AFI_PEX_CTRL_CLKREQ_EN		(1 << 1)
+#define  AFI_PEX_CTRL_REFCLK_EN		(1 << 3)
+#define  AFI_PEX_CTRL_OVERRIDE_EN	(1 << 4)
+
+#define AFI_PLLE_CONTROL		0x160
+#define  AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
+#define  AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
+
+#define AFI_PEXBIAS_CTRL_0		0x168
+
+#define RP_ECTL_2_R1	0x00000e84
+#define  RP_ECTL_2_R1_RX_CTLE_1C_MASK		0xffff
+
+#define RP_ECTL_4_R1	0x00000e8c
+#define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK	(0xffff << 16)
+#define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT	16
+
+#define RP_ECTL_5_R1	0x00000e90
+#define  RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK	0xffffffff
+
+#define RP_ECTL_6_R1	0x00000e94
+#define  RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK	0xffffffff
+
+#define RP_ECTL_2_R2	0x00000ea4
+#define  RP_ECTL_2_R2_RX_CTLE_1C_MASK	0xffff
+
+#define RP_ECTL_4_R2	0x00000eac
+#define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK	(0xffff << 16)
+#define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT	16
+
+#define RP_ECTL_5_R2	0x00000eb0
+#define  RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK	0xffffffff
+
+#define RP_ECTL_6_R2	0x00000eb4
+#define  RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK	0xffffffff
+
+#define RP_VEND_XP	0x00000f00
+#define  RP_VEND_XP_DL_UP			(1 << 30)
+#define  RP_VEND_XP_OPPORTUNISTIC_ACK		(1 << 27)
+#define  RP_VEND_XP_OPPORTUNISTIC_UPDATEFC	(1 << 28)
+#define  RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK	(0xff << 18)
+
+#define RP_VEND_CTL0	0x00000f44
+#define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK	(0xf << 12)
+#define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH	(0x9 << 12)
+
+#define RP_VEND_CTL1	0x00000f48
+#define  RP_VEND_CTL1_ERPT	(1 << 13)
+
+#define RP_VEND_XP_BIST	0x00000f4c
+#define  RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE	(1 << 28)
+
+#define RP_VEND_CTL2 0x00000fa8
+#define  RP_VEND_CTL2_PCA_ENABLE (1 << 7)
+
+#define RP_PRIV_MISC	0x00000fe0
+#define  RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT		(0xe << 0)
+#define  RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT		(0xf << 0)
+#define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK	(0x7f << 16)
+#define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD		(0xf << 16)
+#define  RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE		(1 << 23)
+#define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK	(0x7f << 24)
+#define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD		(0xf << 24)
+#define  RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE		(1 << 31)
+
+#define RP_LINK_CONTROL_STATUS			0x00000090
+#define  RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE	0x20000000
+#define  RP_LINK_CONTROL_STATUS_LINKSTAT_MASK	0x3fff0000
+
+#define RP_LINK_CONTROL_STATUS_2		0x000000b0
+
+#define PADS_CTL_SEL		0x0000009c
+
+#define PADS_CTL		0x000000a0
+#define  PADS_CTL_IDDQ_1L	(1 << 0)
+#define  PADS_CTL_TX_DATA_EN_1L	(1 << 6)
+#define  PADS_CTL_RX_DATA_EN_1L	(1 << 10)
+
+#define PADS_PLL_CTL_TEGRA20			0x000000b8
+#define PADS_PLL_CTL_TEGRA30			0x000000b4
+#define  PADS_PLL_CTL_RST_B4SM			(1 << 1)
+#define  PADS_PLL_CTL_LOCKDET			(1 << 8)
+#define  PADS_PLL_CTL_REFCLK_MASK		(0x3 << 16)
+#define  PADS_PLL_CTL_REFCLK_INTERNAL_CML	(0 << 16)
+#define  PADS_PLL_CTL_REFCLK_INTERNAL_CMOS	(1 << 16)
+#define  PADS_PLL_CTL_REFCLK_EXTERNAL		(2 << 16)
+#define  PADS_PLL_CTL_TXCLKREF_MASK		(0x1 << 20)
+#define  PADS_PLL_CTL_TXCLKREF_DIV10		(0 << 20)
+#define  PADS_PLL_CTL_TXCLKREF_DIV5		(1 << 20)
+#define  PADS_PLL_CTL_TXCLKREF_BUF_EN		(1 << 22)
+
+#define PADS_REFCLK_CFG0			0x000000c8
+#define PADS_REFCLK_CFG1			0x000000cc
+#define PADS_REFCLK_BIAS			0x000000d0
+
+/*
+ * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
+ * entries, one entry per PCIe port. These field definitions and desired
+ * values aren't in the TRM, but do come from NVIDIA.
+ */
+#define PADS_REFCLK_CFG_TERM_SHIFT		2  /* 6:2 */
+#define PADS_REFCLK_CFG_E_TERM_SHIFT		7
+#define PADS_REFCLK_CFG_PREDI_SHIFT		8  /* 11:8 */
+#define PADS_REFCLK_CFG_DRVI_SHIFT		12 /* 15:12 */
+
+#define PME_ACK_TIMEOUT 10000
+#define LINK_RETRAIN_TIMEOUT 100000 /* in usec */
+
+struct tegra_msi {
+	struct msi_controller chip;
+	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
+	struct irq_domain *domain;
+	struct mutex lock;
+	void *virt;
+	dma_addr_t phys;
+	int irq;
+};
+
+/* used to differentiate between Tegra SoC generations */
+struct tegra_pcie_port_soc {
+	struct {
+		u8 turnoff_bit;
+		u8 ack_bit;
+	} pme;
+};
+
+struct tegra_pcie_soc {
+	unsigned int num_ports;
+	const struct tegra_pcie_port_soc *ports;
+	unsigned int msi_base_shift;
+	unsigned long afi_pex2_ctrl;
+	u32 pads_pll_ctl;
+	u32 tx_ref_sel;
+	u32 pads_refclk_cfg0;
+	u32 pads_refclk_cfg1;
+	u32 update_fc_threshold;
+	bool has_pex_clkreq_en;
+	bool has_pex_bias_ctrl;
+	bool has_intr_prsnt_sense;
+	bool has_cml_clk;
+	bool has_gen2;
+	bool force_pca_enable;
+	bool program_uphy;
+	bool update_clamp_threshold;
+	bool program_deskew_time;
+	bool update_fc_timer;
+	bool has_cache_bars;
+	struct {
+		struct {
+			u32 rp_ectl_2_r1;
+			u32 rp_ectl_4_r1;
+			u32 rp_ectl_5_r1;
+			u32 rp_ectl_6_r1;
+			u32 rp_ectl_2_r2;
+			u32 rp_ectl_4_r2;
+			u32 rp_ectl_5_r2;
+			u32 rp_ectl_6_r2;
+		} regs;
+		bool enable;
+	} ectl;
+};
+
+static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
+{
+	return container_of(chip, struct tegra_msi, chip);
+}
+
+struct tegra_pcie {
+	struct device *dev;
+
+	void __iomem *pads;
+	void __iomem *afi;
+	void __iomem *cfg;
+	int irq;
+
+	struct resource cs;
+	struct resource io;
+	struct resource pio;
+	struct resource mem;
+	struct resource prefetch;
+	struct resource busn;
+
+	struct {
+		resource_size_t mem;
+		resource_size_t io;
+	} offset;
+
+	struct clk *pex_clk;
+	struct clk *afi_clk;
+	struct clk *pll_e;
+	struct clk *cml_clk;
+
+	struct reset_control *pex_rst;
+	struct reset_control *afi_rst;
+	struct reset_control *pcie_xrst;
+
+	bool legacy_phy;
+	struct phy *phy;
+
+	struct tegra_msi msi;
+
+	struct list_head ports;
+	u32 xbar_config;
+
+	struct regulator_bulk_data *supplies;
+	unsigned int num_supplies;
+
+	const struct tegra_pcie_soc *soc;
+	struct dentry *debugfs;
+};
+
+struct tegra_pcie_port {
+	struct tegra_pcie *pcie;
+	struct device_node *np;
+	struct list_head list;
+	struct resource regs;
+	void __iomem *base;
+	unsigned int index;
+	unsigned int lanes;
+
+	struct phy **phys;
+
+	struct gpio_desc *reset_gpio;
+};
+
+struct tegra_pcie_bus {
+	struct list_head list;
+	unsigned int nr;
+};
+
+static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
+			      unsigned long offset)
+{
+	writel(value, pcie->afi + offset);
+}
+
+static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
+{
+	return readl(pcie->afi + offset);
+}
+
+static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
+			       unsigned long offset)
+{
+	writel(value, pcie->pads + offset);
+}
+
+static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
+{
+	return readl(pcie->pads + offset);
+}
+
+/*
+ * The configuration space mapping on Tegra is somewhat similar to the ECAM
+ * defined by PCIe. However it deviates a bit in how the 4 bits for extended
+ * register accesses are mapped:
+ *
+ *    [27:24] extended register number
+ *    [23:16] bus number
+ *    [15:11] device number
+ *    [10: 8] function number
+ *    [ 7: 0] register number
+ *
+ * Mapping the whole extended configuration space would require 256 MiB of
+ * virtual address space, only a small part of which will actually be used.
+ *
+ * To work around this, a 4 KiB region is used to generate the required
+ * configuration transaction with relevant B:D:F and register offset values.
+ * This is achieved by dynamically programming base address and size of
+ * AFI_AXI_BAR used for end point config space mapping to make sure that the
+ * address (access to which generates correct config transaction) falls in
+ * this 4 KiB region.
+ */
+static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn,
+					   unsigned int where)
+{
+	return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) |
+	       (PCI_FUNC(devfn) << 8) | (where & 0xff);
+}
+
+static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
+					unsigned int devfn,
+					int where)
+{
+	struct tegra_pcie *pcie = bus->sysdata;
+	void __iomem *addr = NULL;
+
+	if (bus->number == 0) {
+		unsigned int slot = PCI_SLOT(devfn);
+		struct tegra_pcie_port *port;
+
+		list_for_each_entry(port, &pcie->ports, list) {
+			if (port->index + 1 == slot) {
+				addr = port->base + (where & ~3);
+				break;
+			}
+		}
+	} else {
+		unsigned int offset;
+		u32 base;
+
+		offset = tegra_pcie_conf_offset(bus->number, devfn, where);
+
+		/* move 4 KiB window to offset within the FPCI region */
+		base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8);
+		afi_writel(pcie, base, AFI_FPCI_BAR0);
+
+		/* move to correct offset within the 4 KiB page */
+		addr = pcie->cfg + (offset & (SZ_4K - 1));
+	}
+
+	return addr;
+}
+
+static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
+				  int where, int size, u32 *value)
+{
+	if (bus->number == 0)
+		return pci_generic_config_read32(bus, devfn, where, size,
+						 value);
+
+	return pci_generic_config_read(bus, devfn, where, size, value);
+}
+
+static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
+				   int where, int size, u32 value)
+{
+	if (bus->number == 0)
+		return pci_generic_config_write32(bus, devfn, where, size,
+						  value);
+
+	return pci_generic_config_write(bus, devfn, where, size, value);
+}
+
+static struct pci_ops tegra_pcie_ops = {
+	.map_bus = tegra_pcie_map_bus,
+	.read = tegra_pcie_config_read,
+	.write = tegra_pcie_config_write,
+};
+
+static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
+{
+	const struct tegra_pcie_soc *soc = port->pcie->soc;
+	unsigned long ret = 0;
+
+	switch (port->index) {
+	case 0:
+		ret = AFI_PEX0_CTRL;
+		break;
+
+	case 1:
+		ret = AFI_PEX1_CTRL;
+		break;
+
+	case 2:
+		ret = soc->afi_pex2_ctrl;
+		break;
+	}
+
+	return ret;
+}
+
+static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
+{
+	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
+	unsigned long value;
+
+	/* pulse reset signal */
+	if (port->reset_gpio) {
+		gpiod_set_value(port->reset_gpio, 1);
+	} else {
+		value = afi_readl(port->pcie, ctrl);
+		value &= ~AFI_PEX_CTRL_RST;
+		afi_writel(port->pcie, value, ctrl);
+	}
+
+	usleep_range(1000, 2000);
+
+	if (port->reset_gpio) {
+		gpiod_set_value(port->reset_gpio, 0);
+	} else {
+		value = afi_readl(port->pcie, ctrl);
+		value |= AFI_PEX_CTRL_RST;
+		afi_writel(port->pcie, value, ctrl);
+	}
+}
+
+static void tegra_pcie_enable_rp_features(struct tegra_pcie_port *port)
+{
+	const struct tegra_pcie_soc *soc = port->pcie->soc;
+	u32 value;
+
+	/* Enable AER capability */
+	value = readl(port->base + RP_VEND_CTL1);
+	value |= RP_VEND_CTL1_ERPT;
+	writel(value, port->base + RP_VEND_CTL1);
+
+	/* Optimal settings to enhance bandwidth */
+	value = readl(port->base + RP_VEND_XP);
+	value |= RP_VEND_XP_OPPORTUNISTIC_ACK;
+	value |= RP_VEND_XP_OPPORTUNISTIC_UPDATEFC;
+	writel(value, port->base + RP_VEND_XP);
+
+	/*
+	 * LTSSM will wait for DLLP to finish before entering L1 or L2,
+	 * to avoid truncation of PM messages which results in receiver errors
+	 */
+	value = readl(port->base + RP_VEND_XP_BIST);
+	value |= RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE;
+	writel(value, port->base + RP_VEND_XP_BIST);
+
+	value = readl(port->base + RP_PRIV_MISC);
+	value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE;
+	value |= RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE;
+
+	if (soc->update_clamp_threshold) {
+		value &= ~(RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK |
+				RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK);
+		value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD |
+			RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD;
+	}
+
+	writel(value, port->base + RP_PRIV_MISC);
+}
+
+static void tegra_pcie_program_ectl_settings(struct tegra_pcie_port *port)
+{
+	const struct tegra_pcie_soc *soc = port->pcie->soc;
+	u32 value;
+
+	value = readl(port->base + RP_ECTL_2_R1);
+	value &= ~RP_ECTL_2_R1_RX_CTLE_1C_MASK;
+	value |= soc->ectl.regs.rp_ectl_2_r1;
+	writel(value, port->base + RP_ECTL_2_R1);
+
+	value = readl(port->base + RP_ECTL_4_R1);
+	value &= ~RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK;
+	value |= soc->ectl.regs.rp_ectl_4_r1 <<
+				RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT;
+	writel(value, port->base + RP_ECTL_4_R1);
+
+	value = readl(port->base + RP_ECTL_5_R1);
+	value &= ~RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK;
+	value |= soc->ectl.regs.rp_ectl_5_r1;
+	writel(value, port->base + RP_ECTL_5_R1);
+
+	value = readl(port->base + RP_ECTL_6_R1);
+	value &= ~RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK;
+	value |= soc->ectl.regs.rp_ectl_6_r1;
+	writel(value, port->base + RP_ECTL_6_R1);
+
+	value = readl(port->base + RP_ECTL_2_R2);
+	value &= ~RP_ECTL_2_R2_RX_CTLE_1C_MASK;
+	value |= soc->ectl.regs.rp_ectl_2_r2;
+	writel(value, port->base + RP_ECTL_2_R2);
+
+	value = readl(port->base + RP_ECTL_4_R2);
+	value &= ~RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK;
+	value |= soc->ectl.regs.rp_ectl_4_r2 <<
+				RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT;
+	writel(value, port->base + RP_ECTL_4_R2);
+
+	value = readl(port->base + RP_ECTL_5_R2);
+	value &= ~RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK;
+	value |= soc->ectl.regs.rp_ectl_5_r2;
+	writel(value, port->base + RP_ECTL_5_R2);
+
+	value = readl(port->base + RP_ECTL_6_R2);
+	value &= ~RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK;
+	value |= soc->ectl.regs.rp_ectl_6_r2;
+	writel(value, port->base + RP_ECTL_6_R2);
+}
+
+static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port)
+{
+	const struct tegra_pcie_soc *soc = port->pcie->soc;
+	u32 value;
+
+	/*
+	 * Sometimes link speed change from Gen2 to Gen1 fails due to
+	 * instability in deskew logic on lane-0. Increase the deskew
+	 * retry time to resolve this issue.
+	 */
+	if (soc->program_deskew_time) {
+		value = readl(port->base + RP_VEND_CTL0);
+		value &= ~RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK;
+		value |= RP_VEND_CTL0_DSK_RST_PULSE_WIDTH;
+		writel(value, port->base + RP_VEND_CTL0);
+	}
+
+	if (soc->update_fc_timer) {
+		value = readl(port->base + RP_VEND_XP);
+		value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
+		value |= soc->update_fc_threshold;
+		writel(value, port->base + RP_VEND_XP);
+	}
+
+	/*
+	 * PCIe link doesn't come up with few legacy PCIe endpoints if
+	 * root port advertises both Gen-1 and Gen-2 speeds in Tegra.
+	 * Hence, the strategy followed here is to initially advertise
+	 * only Gen-1 and after link is up, retrain link to Gen-2 speed
+	 */
+	value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
+	value &= ~PCI_EXP_LNKSTA_CLS;
+	value |= PCI_EXP_LNKSTA_CLS_2_5GB;
+	writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
+}
+
+static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
+{
+	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
+	const struct tegra_pcie_soc *soc = port->pcie->soc;
+	unsigned long value;
+
+	/* enable reference clock */
+	value = afi_readl(port->pcie, ctrl);
+	value |= AFI_PEX_CTRL_REFCLK_EN;
+
+	if (soc->has_pex_clkreq_en)
+		value |= AFI_PEX_CTRL_CLKREQ_EN;
+
+	value |= AFI_PEX_CTRL_OVERRIDE_EN;
+
+	afi_writel(port->pcie, value, ctrl);
+
+	tegra_pcie_port_reset(port);
+
+	if (soc->force_pca_enable) {
+		value = readl(port->base + RP_VEND_CTL2);
+		value |= RP_VEND_CTL2_PCA_ENABLE;
+		writel(value, port->base + RP_VEND_CTL2);
+	}
+
+	tegra_pcie_enable_rp_features(port);
+
+	if (soc->ectl.enable)
+		tegra_pcie_program_ectl_settings(port);
+
+	tegra_pcie_apply_sw_fixup(port);
+}
+
+static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
+{
+	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
+	const struct tegra_pcie_soc *soc = port->pcie->soc;
+	unsigned long value;
+
+	/* assert port reset */
+	value = afi_readl(port->pcie, ctrl);
+	value &= ~AFI_PEX_CTRL_RST;
+	afi_writel(port->pcie, value, ctrl);
+
+	/* disable reference clock */
+	value = afi_readl(port->pcie, ctrl);
+
+	if (soc->has_pex_clkreq_en)
+		value &= ~AFI_PEX_CTRL_CLKREQ_EN;
+
+	value &= ~AFI_PEX_CTRL_REFCLK_EN;
+	afi_writel(port->pcie, value, ctrl);
+
+	/* disable PCIe port and set CLKREQ# as GPIO to allow PLLE power down */
+	value = afi_readl(port->pcie, AFI_PCIE_CONFIG);
+	value |= AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
+	value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
+	afi_writel(port->pcie, value, AFI_PCIE_CONFIG);
+}
+
+static void tegra_pcie_port_free(struct tegra_pcie_port *port)
+{
+	struct tegra_pcie *pcie = port->pcie;
+	struct device *dev = pcie->dev;
+
+	devm_iounmap(dev, port->base);
+	devm_release_mem_region(dev, port->regs.start,
+				resource_size(&port->regs));
+	list_del(&port->list);
+	devm_kfree(dev, port);
+}
+
+/* Tegra PCIE root complex wrongly reports device class */
+static void tegra_pcie_fixup_class(struct pci_dev *dev)
+{
+	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
+
+/* Tegra20 and Tegra30 PCIE requires relaxed ordering */
+static void tegra_pcie_relax_enable(struct pci_dev *dev)
+{
+	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable);
+
+static int tegra_pcie_request_resources(struct tegra_pcie *pcie)
+{
+	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+	struct list_head *windows = &host->windows;
+	struct device *dev = pcie->dev;
+	int err;
+
+	pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
+	pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
+	pci_add_resource_offset(windows, &pcie->prefetch, pcie->offset.mem);
+	pci_add_resource(windows, &pcie->busn);
+
+	err = devm_request_pci_bus_resources(dev, windows);
+	if (err < 0) {
+		pci_free_resource_list(windows);
+		return err;
+	}
+
+	pci_remap_iospace(&pcie->pio, pcie->io.start);
+
+	return 0;
+}
+
+static void tegra_pcie_free_resources(struct tegra_pcie *pcie)
+{
+	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+	struct list_head *windows = &host->windows;
+
+	pci_unmap_iospace(&pcie->pio);
+	pci_free_resource_list(windows);
+}
+
+static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
+{
+	struct tegra_pcie *pcie = pdev->bus->sysdata;
+	int irq;
+
+	tegra_cpuidle_pcie_irqs_in_use();
+
+	irq = of_irq_parse_and_map_pci(pdev, slot, pin);
+	if (!irq)
+		irq = pcie->irq;
+
+	return irq;
+}
+
+static irqreturn_t tegra_pcie_isr(int irq, void *arg)
+{
+	const char *err_msg[] = {
+		"Unknown",
+		"AXI slave error",
+		"AXI decode error",
+		"Target abort",
+		"Master abort",
+		"Invalid write",
+		"Legacy interrupt",
+		"Response decoding error",
+		"AXI response decoding error",
+		"Transaction timeout",
+		"Slot present pin change",
+		"Slot clock request change",
+		"TMS clock ramp change",
+		"TMS ready for power down",
+		"Peer2Peer error",
+	};
+	struct tegra_pcie *pcie = arg;
+	struct device *dev = pcie->dev;
+	u32 code, signature;
+
+	code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
+	signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
+	afi_writel(pcie, 0, AFI_INTR_CODE);
+
+	if (code == AFI_INTR_LEGACY)
+		return IRQ_NONE;
+
+	if (code >= ARRAY_SIZE(err_msg))
+		code = 0;
+
+	/*
+	 * do not pollute kernel log with master abort reports since they
+	 * happen a lot during enumeration
+	 */
+	if (code == AFI_INTR_MASTER_ABORT || code == AFI_INTR_PE_PRSNT_SENSE)
+		dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
+	else
+		dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
+
+	if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
+	    code == AFI_INTR_FPCI_DECODE_ERROR) {
+		u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
+		u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
+
+		if (code == AFI_INTR_MASTER_ABORT)
+			dev_dbg(dev, "  FPCI address: %10llx\n", address);
+		else
+			dev_err(dev, "  FPCI address: %10llx\n", address);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * FPCI map is as follows:
+ * - 0xfdfc000000: I/O space
+ * - 0xfdfe000000: type 0 configuration space
+ * - 0xfdff000000: type 1 configuration space
+ * - 0xfe00000000: type 0 extended configuration space
+ * - 0xfe10000000: type 1 extended configuration space
+ */
+static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
+{
+	u32 fpci_bar, size, axi_address;
+
+	/* Bar 0: type 1 extended configuration space */
+	size = resource_size(&pcie->cs);
+	afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START);
+	afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
+
+	/* Bar 1: downstream IO bar */
+	fpci_bar = 0xfdfc0000;
+	size = resource_size(&pcie->io);
+	axi_address = pcie->io.start;
+	afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
+	afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
+	afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
+
+	/* Bar 2: prefetchable memory BAR */
+	fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
+	size = resource_size(&pcie->prefetch);
+	axi_address = pcie->prefetch.start;
+	afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
+	afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
+	afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
+
+	/* Bar 3: non prefetchable memory BAR */
+	fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
+	size = resource_size(&pcie->mem);
+	axi_address = pcie->mem.start;
+	afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
+	afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
+	afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
+
+	/* NULL out the remaining BARs as they are not used */
+	afi_writel(pcie, 0, AFI_AXI_BAR4_START);
+	afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
+	afi_writel(pcie, 0, AFI_FPCI_BAR4);
+
+	afi_writel(pcie, 0, AFI_AXI_BAR5_START);
+	afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
+	afi_writel(pcie, 0, AFI_FPCI_BAR5);
+
+	if (pcie->soc->has_cache_bars) {
+		/* map all upstream transactions as uncached */
+		afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
+		afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
+		afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
+		afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
+	}
+
+	/* MSI translations are setup only when needed */
+	afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
+	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
+	afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
+	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
+}
+
+static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
+{
+	const struct tegra_pcie_soc *soc = pcie->soc;
+	u32 value;
+
+	timeout = jiffies + msecs_to_jiffies(timeout);
+
+	while (time_before(jiffies, timeout)) {
+		value = pads_readl(pcie, soc->pads_pll_ctl);
+		if (value & PADS_PLL_CTL_LOCKDET)
+			return 0;
+	}
+
+	return -ETIMEDOUT;
+}
+
+static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	const struct tegra_pcie_soc *soc = pcie->soc;
+	u32 value;
+	int err;
+
+	/* initialize internal PHY, enable up to 16 PCIE lanes */
+	pads_writel(pcie, 0x0, PADS_CTL_SEL);
+
+	/* override IDDQ to 1 on all 4 lanes */
+	value = pads_readl(pcie, PADS_CTL);
+	value |= PADS_CTL_IDDQ_1L;
+	pads_writel(pcie, value, PADS_CTL);
+
+	/*
+	 * Set up PHY PLL inputs select PLLE output as refclock,
+	 * set TX ref sel to div10 (not div5).
+	 */
+	value = pads_readl(pcie, soc->pads_pll_ctl);
+	value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
+	value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
+	pads_writel(pcie, value, soc->pads_pll_ctl);
+
+	/* reset PLL */
+	value = pads_readl(pcie, soc->pads_pll_ctl);
+	value &= ~PADS_PLL_CTL_RST_B4SM;
+	pads_writel(pcie, value, soc->pads_pll_ctl);
+
+	usleep_range(20, 100);
+
+	/* take PLL out of reset  */
+	value = pads_readl(pcie, soc->pads_pll_ctl);
+	value |= PADS_PLL_CTL_RST_B4SM;
+	pads_writel(pcie, value, soc->pads_pll_ctl);
+
+	/* wait for the PLL to lock */
+	err = tegra_pcie_pll_wait(pcie, 500);
+	if (err < 0) {
+		dev_err(dev, "PLL failed to lock: %d\n", err);
+		return err;
+	}
+
+	/* turn off IDDQ override */
+	value = pads_readl(pcie, PADS_CTL);
+	value &= ~PADS_CTL_IDDQ_1L;
+	pads_writel(pcie, value, PADS_CTL);
+
+	/* enable TX/RX data */
+	value = pads_readl(pcie, PADS_CTL);
+	value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
+	pads_writel(pcie, value, PADS_CTL);
+
+	return 0;
+}
+
+static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
+{
+	const struct tegra_pcie_soc *soc = pcie->soc;
+	u32 value;
+
+	/* disable TX/RX data */
+	value = pads_readl(pcie, PADS_CTL);
+	value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
+	pads_writel(pcie, value, PADS_CTL);
+
+	/* override IDDQ */
+	value = pads_readl(pcie, PADS_CTL);
+	value |= PADS_CTL_IDDQ_1L;
+	pads_writel(pcie, value, PADS_CTL);
+
+	/* reset PLL */
+	value = pads_readl(pcie, soc->pads_pll_ctl);
+	value &= ~PADS_PLL_CTL_RST_B4SM;
+	pads_writel(pcie, value, soc->pads_pll_ctl);
+
+	usleep_range(20, 100);
+
+	return 0;
+}
+
+static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
+{
+	struct device *dev = port->pcie->dev;
+	unsigned int i;
+	int err;
+
+	for (i = 0; i < port->lanes; i++) {
+		err = phy_power_on(port->phys[i]);
+		if (err < 0) {
+			dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
+{
+	struct device *dev = port->pcie->dev;
+	unsigned int i;
+	int err;
+
+	for (i = 0; i < port->lanes; i++) {
+		err = phy_power_off(port->phys[i]);
+		if (err < 0) {
+			dev_err(dev, "failed to power off PHY#%u: %d\n", i,
+				err);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	struct tegra_pcie_port *port;
+	int err;
+
+	if (pcie->legacy_phy) {
+		if (pcie->phy)
+			err = phy_power_on(pcie->phy);
+		else
+			err = tegra_pcie_phy_enable(pcie);
+
+		if (err < 0)
+			dev_err(dev, "failed to power on PHY: %d\n", err);
+
+		return err;
+	}
+
+	list_for_each_entry(port, &pcie->ports, list) {
+		err = tegra_pcie_port_phy_power_on(port);
+		if (err < 0) {
+			dev_err(dev,
+				"failed to power on PCIe port %u PHY: %d\n",
+				port->index, err);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	struct tegra_pcie_port *port;
+	int err;
+
+	if (pcie->legacy_phy) {
+		if (pcie->phy)
+			err = phy_power_off(pcie->phy);
+		else
+			err = tegra_pcie_phy_disable(pcie);
+
+		if (err < 0)
+			dev_err(dev, "failed to power off PHY: %d\n", err);
+
+		return err;
+	}
+
+	list_for_each_entry(port, &pcie->ports, list) {
+		err = tegra_pcie_port_phy_power_off(port);
+		if (err < 0) {
+			dev_err(dev,
+				"failed to power off PCIe port %u PHY: %d\n",
+				port->index, err);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+static void tegra_pcie_enable_controller(struct tegra_pcie *pcie)
+{
+	const struct tegra_pcie_soc *soc = pcie->soc;
+	struct tegra_pcie_port *port;
+	unsigned long value;
+
+	/* enable PLL power down */
+	if (pcie->phy) {
+		value = afi_readl(pcie, AFI_PLLE_CONTROL);
+		value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
+		value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
+		afi_writel(pcie, value, AFI_PLLE_CONTROL);
+	}
+
+	/* power down PCIe slot clock bias pad */
+	if (soc->has_pex_bias_ctrl)
+		afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
+
+	/* configure mode and disable all ports */
+	value = afi_readl(pcie, AFI_PCIE_CONFIG);
+	value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
+	value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
+	value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL;
+
+	list_for_each_entry(port, &pcie->ports, list) {
+		value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
+		value &= ~AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
+	}
+
+	afi_writel(pcie, value, AFI_PCIE_CONFIG);
+
+	if (soc->has_gen2) {
+		value = afi_readl(pcie, AFI_FUSE);
+		value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
+		afi_writel(pcie, value, AFI_FUSE);
+	} else {
+		value = afi_readl(pcie, AFI_FUSE);
+		value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
+		afi_writel(pcie, value, AFI_FUSE);
+	}
+
+	/* Disable AFI dynamic clock gating and enable PCIe */
+	value = afi_readl(pcie, AFI_CONFIGURATION);
+	value |= AFI_CONFIGURATION_EN_FPCI;
+	value |= AFI_CONFIGURATION_CLKEN_OVERRIDE;
+	afi_writel(pcie, value, AFI_CONFIGURATION);
+
+	value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
+		AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
+		AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
+
+	if (soc->has_intr_prsnt_sense)
+		value |= AFI_INTR_EN_PRSNT_SENSE;
+
+	afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
+	afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
+
+	/* don't enable MSI for now, only when needed */
+	afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
+
+	/* disable all exceptions */
+	afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
+}
+
+static void tegra_pcie_power_off(struct tegra_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	const struct tegra_pcie_soc *soc = pcie->soc;
+	int err;
+
+	reset_control_assert(pcie->afi_rst);
+
+	clk_disable_unprepare(pcie->pll_e);
+	if (soc->has_cml_clk)
+		clk_disable_unprepare(pcie->cml_clk);
+	clk_disable_unprepare(pcie->afi_clk);
+
+	if (!dev->pm_domain)
+		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
+
+	err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
+	if (err < 0)
+		dev_warn(dev, "failed to disable regulators: %d\n", err);
+}
+
+static int tegra_pcie_power_on(struct tegra_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	const struct tegra_pcie_soc *soc = pcie->soc;
+	int err;
+
+	reset_control_assert(pcie->pcie_xrst);
+	reset_control_assert(pcie->afi_rst);
+	reset_control_assert(pcie->pex_rst);
+
+	if (!dev->pm_domain)
+		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
+
+	/* enable regulators */
+	err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
+	if (err < 0)
+		dev_err(dev, "failed to enable regulators: %d\n", err);
+
+	if (!dev->pm_domain) {
+		err = tegra_powergate_power_on(TEGRA_POWERGATE_PCIE);
+		if (err) {
+			dev_err(dev, "failed to power ungate: %d\n", err);
+			goto regulator_disable;
+		}
+		err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_PCIE);
+		if (err) {
+			dev_err(dev, "failed to remove clamp: %d\n", err);
+			goto powergate;
+		}
+	}
+
+	err = clk_prepare_enable(pcie->afi_clk);
+	if (err < 0) {
+		dev_err(dev, "failed to enable AFI clock: %d\n", err);
+		goto powergate;
+	}
+
+	if (soc->has_cml_clk) {
+		err = clk_prepare_enable(pcie->cml_clk);
+		if (err < 0) {
+			dev_err(dev, "failed to enable CML clock: %d\n", err);
+			goto disable_afi_clk;
+		}
+	}
+
+	err = clk_prepare_enable(pcie->pll_e);
+	if (err < 0) {
+		dev_err(dev, "failed to enable PLLE clock: %d\n", err);
+		goto disable_cml_clk;
+	}
+
+	reset_control_deassert(pcie->afi_rst);
+
+	return 0;
+
+disable_cml_clk:
+	if (soc->has_cml_clk)
+		clk_disable_unprepare(pcie->cml_clk);
+disable_afi_clk:
+	clk_disable_unprepare(pcie->afi_clk);
+powergate:
+	if (!dev->pm_domain)
+		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
+regulator_disable:
+	regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
+
+	return err;
+}
+
+static void tegra_pcie_apply_pad_settings(struct tegra_pcie *pcie)
+{
+	const struct tegra_pcie_soc *soc = pcie->soc;
+
+	/* Configure the reference clock driver */
+	pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
+
+	if (soc->num_ports > 2)
+		pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
+}
+
+static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	const struct tegra_pcie_soc *soc = pcie->soc;
+
+	pcie->pex_clk = devm_clk_get(dev, "pex");
+	if (IS_ERR(pcie->pex_clk))
+		return PTR_ERR(pcie->pex_clk);
+
+	pcie->afi_clk = devm_clk_get(dev, "afi");
+	if (IS_ERR(pcie->afi_clk))
+		return PTR_ERR(pcie->afi_clk);
+
+	pcie->pll_e = devm_clk_get(dev, "pll_e");
+	if (IS_ERR(pcie->pll_e))
+		return PTR_ERR(pcie->pll_e);
+
+	if (soc->has_cml_clk) {
+		pcie->cml_clk = devm_clk_get(dev, "cml");
+		if (IS_ERR(pcie->cml_clk))
+			return PTR_ERR(pcie->cml_clk);
+	}
+
+	return 0;
+}
+
+static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+
+	pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex");
+	if (IS_ERR(pcie->pex_rst))
+		return PTR_ERR(pcie->pex_rst);
+
+	pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi");
+	if (IS_ERR(pcie->afi_rst))
+		return PTR_ERR(pcie->afi_rst);
+
+	pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x");
+	if (IS_ERR(pcie->pcie_xrst))
+		return PTR_ERR(pcie->pcie_xrst);
+
+	return 0;
+}
+
+static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	int err;
+
+	pcie->phy = devm_phy_optional_get(dev, "pcie");
+	if (IS_ERR(pcie->phy)) {
+		err = PTR_ERR(pcie->phy);
+		dev_err(dev, "failed to get PHY: %d\n", err);
+		return err;
+	}
+
+	err = phy_init(pcie->phy);
+	if (err < 0) {
+		dev_err(dev, "failed to initialize PHY: %d\n", err);
+		return err;
+	}
+
+	pcie->legacy_phy = true;
+
+	return 0;
+}
+
+static struct phy *devm_of_phy_optional_get_index(struct device *dev,
+						  struct device_node *np,
+						  const char *consumer,
+						  unsigned int index)
+{
+	struct phy *phy;
+	char *name;
+
+	name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index);
+	if (!name)
+		return ERR_PTR(-ENOMEM);
+
+	phy = devm_of_phy_get(dev, np, name);
+	kfree(name);
+
+	if (IS_ERR(phy) && PTR_ERR(phy) == -ENODEV)
+		phy = NULL;
+
+	return phy;
+}
+
+static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
+{
+	struct device *dev = port->pcie->dev;
+	struct phy *phy;
+	unsigned int i;
+	int err;
+
+	port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
+	if (!port->phys)
+		return -ENOMEM;
+
+	for (i = 0; i < port->lanes; i++) {
+		phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i);
+		if (IS_ERR(phy)) {
+			dev_err(dev, "failed to get PHY#%u: %ld\n", i,
+				PTR_ERR(phy));
+			return PTR_ERR(phy);
+		}
+
+		err = phy_init(phy);
+		if (err < 0) {
+			dev_err(dev, "failed to initialize PHY#%u: %d\n", i,
+				err);
+			return err;
+		}
+
+		port->phys[i] = phy;
+	}
+
+	return 0;
+}
+
+static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
+{
+	const struct tegra_pcie_soc *soc = pcie->soc;
+	struct device_node *np = pcie->dev->of_node;
+	struct tegra_pcie_port *port;
+	int err;
+
+	if (!soc->has_gen2 || of_find_property(np, "phys", NULL) != NULL)
+		return tegra_pcie_phys_get_legacy(pcie);
+
+	list_for_each_entry(port, &pcie->ports, list) {
+		err = tegra_pcie_port_get_phys(port);
+		if (err < 0)
+			return err;
+	}
+
+	return 0;
+}
+
+static void tegra_pcie_phys_put(struct tegra_pcie *pcie)
+{
+	struct tegra_pcie_port *port;
+	struct device *dev = pcie->dev;
+	int err, i;
+
+	if (pcie->legacy_phy) {
+		err = phy_exit(pcie->phy);
+		if (err < 0)
+			dev_err(dev, "failed to teardown PHY: %d\n", err);
+		return;
+	}
+
+	list_for_each_entry(port, &pcie->ports, list) {
+		for (i = 0; i < port->lanes; i++) {
+			err = phy_exit(port->phys[i]);
+			if (err < 0)
+				dev_err(dev, "failed to teardown PHY#%u: %d\n",
+					i, err);
+		}
+	}
+}
+
+
+static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct resource *pads, *afi, *res;
+	const struct tegra_pcie_soc *soc = pcie->soc;
+	int err;
+
+	err = tegra_pcie_clocks_get(pcie);
+	if (err) {
+		dev_err(dev, "failed to get clocks: %d\n", err);
+		return err;
+	}
+
+	err = tegra_pcie_resets_get(pcie);
+	if (err) {
+		dev_err(dev, "failed to get resets: %d\n", err);
+		return err;
+	}
+
+	if (soc->program_uphy) {
+		err = tegra_pcie_phys_get(pcie);
+		if (err < 0) {
+			dev_err(dev, "failed to get PHYs: %d\n", err);
+			return err;
+		}
+	}
+
+	pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
+	pcie->pads = devm_ioremap_resource(dev, pads);
+	if (IS_ERR(pcie->pads)) {
+		err = PTR_ERR(pcie->pads);
+		goto phys_put;
+	}
+
+	afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
+	pcie->afi = devm_ioremap_resource(dev, afi);
+	if (IS_ERR(pcie->afi)) {
+		err = PTR_ERR(pcie->afi);
+		goto phys_put;
+	}
+
+	/* request configuration space, but remap later, on demand */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
+	if (!res) {
+		err = -EADDRNOTAVAIL;
+		goto phys_put;
+	}
+
+	pcie->cs = *res;
+
+	/* constrain configuration space to 4 KiB */
+	pcie->cs.end = pcie->cs.start + SZ_4K - 1;
+
+	pcie->cfg = devm_ioremap_resource(dev, &pcie->cs);
+	if (IS_ERR(pcie->cfg)) {
+		err = PTR_ERR(pcie->cfg);
+		goto phys_put;
+	}
+
+	/* request interrupt */
+	err = platform_get_irq_byname(pdev, "intr");
+	if (err < 0) {
+		dev_err(dev, "failed to get IRQ: %d\n", err);
+		goto phys_put;
+	}
+
+	pcie->irq = err;
+
+	err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
+	if (err) {
+		dev_err(dev, "failed to register IRQ: %d\n", err);
+		goto phys_put;
+	}
+
+	return 0;
+
+phys_put:
+	if (soc->program_uphy)
+		tegra_pcie_phys_put(pcie);
+	return err;
+}
+
+static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
+{
+	const struct tegra_pcie_soc *soc = pcie->soc;
+
+	if (pcie->irq > 0)
+		free_irq(pcie->irq, pcie);
+
+	if (soc->program_uphy)
+		tegra_pcie_phys_put(pcie);
+
+	return 0;
+}
+
+static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port)
+{
+	struct tegra_pcie *pcie = port->pcie;
+	const struct tegra_pcie_soc *soc = pcie->soc;
+	int err;
+	u32 val;
+	u8 ack_bit;
+
+	val = afi_readl(pcie, AFI_PCIE_PME);
+	val |= (0x1 << soc->ports[port->index].pme.turnoff_bit);
+	afi_writel(pcie, val, AFI_PCIE_PME);
+
+	ack_bit = soc->ports[port->index].pme.ack_bit;
+	err = readl_poll_timeout(pcie->afi + AFI_PCIE_PME, val,
+				 val & (0x1 << ack_bit), 1, PME_ACK_TIMEOUT);
+	if (err)
+		dev_err(pcie->dev, "PME Ack is not received on port: %d\n",
+			port->index);
+
+	usleep_range(10000, 11000);
+
+	val = afi_readl(pcie, AFI_PCIE_PME);
+	val &= ~(0x1 << soc->ports[port->index].pme.turnoff_bit);
+	afi_writel(pcie, val, AFI_PCIE_PME);
+}
+
+static int tegra_msi_alloc(struct tegra_msi *chip)
+{
+	int msi;
+
+	mutex_lock(&chip->lock);
+
+	msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
+	if (msi < INT_PCI_MSI_NR)
+		set_bit(msi, chip->used);
+	else
+		msi = -ENOSPC;
+
+	mutex_unlock(&chip->lock);
+
+	return msi;
+}
+
+static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
+{
+	struct device *dev = chip->chip.dev;
+
+	mutex_lock(&chip->lock);
+
+	if (!test_bit(irq, chip->used))
+		dev_err(dev, "trying to free unused MSI#%lu\n", irq);
+	else
+		clear_bit(irq, chip->used);
+
+	mutex_unlock(&chip->lock);
+}
+
+static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
+{
+	struct tegra_pcie *pcie = data;
+	struct device *dev = pcie->dev;
+	struct tegra_msi *msi = &pcie->msi;
+	unsigned int i, processed = 0;
+
+	for (i = 0; i < 8; i++) {
+		unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
+
+		while (reg) {
+			unsigned int offset = find_first_bit(&reg, 32);
+			unsigned int index = i * 32 + offset;
+			unsigned int irq;
+
+			/* clear the interrupt */
+			afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
+
+			irq = irq_find_mapping(msi->domain, index);
+			if (irq) {
+				if (test_bit(index, msi->used))
+					generic_handle_irq(irq);
+				else
+					dev_info(dev, "unhandled MSI\n");
+			} else {
+				/*
+				 * that's weird who triggered this?
+				 * just clear it
+				 */
+				dev_info(dev, "unexpected MSI\n");
+			}
+
+			/* see if there's any more pending in this vector */
+			reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
+
+			processed++;
+		}
+	}
+
+	return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int tegra_msi_setup_irq(struct msi_controller *chip,
+			       struct pci_dev *pdev, struct msi_desc *desc)
+{
+	struct tegra_msi *msi = to_tegra_msi(chip);
+	struct msi_msg msg;
+	unsigned int irq;
+	int hwirq;
+
+	hwirq = tegra_msi_alloc(msi);
+	if (hwirq < 0)
+		return hwirq;
+
+	irq = irq_create_mapping(msi->domain, hwirq);
+	if (!irq) {
+		tegra_msi_free(msi, hwirq);
+		return -EINVAL;
+	}
+
+	irq_set_msi_desc(irq, desc);
+
+	msg.address_lo = lower_32_bits(msi->phys);
+	msg.address_hi = upper_32_bits(msi->phys);
+	msg.data = hwirq;
+
+	pci_write_msi_msg(irq, &msg);
+
+	return 0;
+}
+
+static void tegra_msi_teardown_irq(struct msi_controller *chip,
+				   unsigned int irq)
+{
+	struct tegra_msi *msi = to_tegra_msi(chip);
+	struct irq_data *d = irq_get_irq_data(irq);
+	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+	irq_dispose_mapping(irq);
+	tegra_msi_free(msi, hwirq);
+}
+
+static struct irq_chip tegra_msi_irq_chip = {
+	.name = "Tegra PCIe MSI",
+	.irq_enable = pci_msi_unmask_irq,
+	.irq_disable = pci_msi_mask_irq,
+	.irq_mask = pci_msi_mask_irq,
+	.irq_unmask = pci_msi_unmask_irq,
+};
+
+static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
+			 irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
+	irq_set_chip_data(irq, domain->host_data);
+
+	tegra_cpuidle_pcie_irqs_in_use();
+
+	return 0;
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+	.map = tegra_msi_map,
+};
+
+static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
+{
+	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+	struct platform_device *pdev = to_platform_device(pcie->dev);
+	struct tegra_msi *msi = &pcie->msi;
+	struct device *dev = pcie->dev;
+	int err;
+
+	mutex_init(&msi->lock);
+
+	msi->chip.dev = dev;
+	msi->chip.setup_irq = tegra_msi_setup_irq;
+	msi->chip.teardown_irq = tegra_msi_teardown_irq;
+
+	msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
+					    &msi_domain_ops, &msi->chip);
+	if (!msi->domain) {
+		dev_err(dev, "failed to create IRQ domain\n");
+		return -ENOMEM;
+	}
+
+	err = platform_get_irq_byname(pdev, "msi");
+	if (err < 0) {
+		dev_err(dev, "failed to get IRQ: %d\n", err);
+		goto free_irq_domain;
+	}
+
+	msi->irq = err;
+
+	err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
+			  tegra_msi_irq_chip.name, pcie);
+	if (err < 0) {
+		dev_err(dev, "failed to request IRQ: %d\n", err);
+		goto free_irq_domain;
+	}
+
+	/* Though the PCIe controller can address >32-bit address space, to
+	 * facilitate endpoints that support only 32-bit MSI target address,
+	 * the mask is set to 32-bit to make sure that MSI target address is
+	 * always a 32-bit address
+	 */
+	err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+	if (err < 0) {
+		dev_err(dev, "failed to set DMA coherent mask: %d\n", err);
+		goto free_irq;
+	}
+
+	msi->virt = dma_alloc_attrs(dev, PAGE_SIZE, &msi->phys, GFP_KERNEL,
+				    DMA_ATTR_NO_KERNEL_MAPPING);
+	if (!msi->virt) {
+		dev_err(dev, "failed to allocate DMA memory for MSI\n");
+		err = -ENOMEM;
+		goto free_irq;
+	}
+
+	host->msi = &msi->chip;
+
+	return 0;
+
+free_irq:
+	free_irq(msi->irq, pcie);
+free_irq_domain:
+	irq_domain_remove(msi->domain);
+	return err;
+}
+
+static void tegra_pcie_enable_msi(struct tegra_pcie *pcie)
+{
+	const struct tegra_pcie_soc *soc = pcie->soc;
+	struct tegra_msi *msi = &pcie->msi;
+	u32 reg;
+
+	afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
+	afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
+	/* this register is in 4K increments */
+	afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
+
+	/* enable all MSI vectors */
+	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
+	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
+	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
+	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
+	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
+	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
+	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
+	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
+
+	/* and unmask the MSI interrupt */
+	reg = afi_readl(pcie, AFI_INTR_MASK);
+	reg |= AFI_INTR_MASK_MSI_MASK;
+	afi_writel(pcie, reg, AFI_INTR_MASK);
+}
+
+static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie)
+{
+	struct tegra_msi *msi = &pcie->msi;
+	unsigned int i, irq;
+
+	dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys,
+		       DMA_ATTR_NO_KERNEL_MAPPING);
+
+	if (msi->irq > 0)
+		free_irq(msi->irq, pcie);
+
+	for (i = 0; i < INT_PCI_MSI_NR; i++) {
+		irq = irq_find_mapping(msi->domain, i);
+		if (irq > 0)
+			irq_dispose_mapping(irq);
+	}
+
+	irq_domain_remove(msi->domain);
+}
+
+static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
+{
+	u32 value;
+
+	/* mask the MSI interrupt */
+	value = afi_readl(pcie, AFI_INTR_MASK);
+	value &= ~AFI_INTR_MASK_MSI_MASK;
+	afi_writel(pcie, value, AFI_INTR_MASK);
+
+	/* disable all MSI vectors */
+	afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
+	afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
+	afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
+	afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
+	afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
+	afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
+	afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
+	afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
+
+	return 0;
+}
+
+static void tegra_pcie_disable_interrupts(struct tegra_pcie *pcie)
+{
+	u32 value;
+
+	value = afi_readl(pcie, AFI_INTR_MASK);
+	value &= ~AFI_INTR_MASK_INT_MASK;
+	afi_writel(pcie, value, AFI_INTR_MASK);
+}
+
+static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
+				      u32 *xbar)
+{
+	struct device *dev = pcie->dev;
+	struct device_node *np = dev->of_node;
+
+	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
+		switch (lanes) {
+		case 0x010004:
+			dev_info(dev, "4x1, 1x1 configuration\n");
+			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401;
+			return 0;
+
+		case 0x010102:
+			dev_info(dev, "2x1, 1X1, 1x1 configuration\n");
+			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
+			return 0;
+
+		case 0x010101:
+			dev_info(dev, "1x1, 1x1, 1x1 configuration\n");
+			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111;
+			return 0;
+
+		default:
+			dev_info(dev, "wrong configuration updated in DT, "
+				 "switching to default 2x1, 1x1, 1x1 "
+				 "configuration\n");
+			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
+			return 0;
+		}
+	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
+		   of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
+		switch (lanes) {
+		case 0x0000104:
+			dev_info(dev, "4x1, 1x1 configuration\n");
+			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
+			return 0;
+
+		case 0x0000102:
+			dev_info(dev, "2x1, 1x1 configuration\n");
+			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
+			return 0;
+		}
+	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
+		switch (lanes) {
+		case 0x00000204:
+			dev_info(dev, "4x1, 2x1 configuration\n");
+			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
+			return 0;
+
+		case 0x00020202:
+			dev_info(dev, "2x3 configuration\n");
+			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
+			return 0;
+
+		case 0x00010104:
+			dev_info(dev, "4x1, 1x2 configuration\n");
+			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
+			return 0;
+		}
+	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
+		switch (lanes) {
+		case 0x00000004:
+			dev_info(dev, "single-mode configuration\n");
+			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
+			return 0;
+
+		case 0x00000202:
+			dev_info(dev, "dual-mode configuration\n");
+			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+/*
+ * Check whether a given set of supplies is available in a device tree node.
+ * This is used to check whether the new or the legacy device tree bindings
+ * should be used.
+ */
+static bool of_regulator_bulk_available(struct device_node *np,
+					struct regulator_bulk_data *supplies,
+					unsigned int num_supplies)
+{
+	char property[32];
+	unsigned int i;
+
+	for (i = 0; i < num_supplies; i++) {
+		snprintf(property, 32, "%s-supply", supplies[i].supply);
+
+		if (of_find_property(np, property, NULL) == NULL)
+			return false;
+	}
+
+	return true;
+}
+
+/*
+ * Old versions of the device tree binding for this device used a set of power
+ * supplies that didn't match the hardware inputs. This happened to work for a
+ * number of cases but is not future proof. However to preserve backwards-
+ * compatibility with old device trees, this function will try to use the old
+ * set of supplies.
+ */
+static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	struct device_node *np = dev->of_node;
+
+	if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
+		pcie->num_supplies = 3;
+	else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
+		pcie->num_supplies = 2;
+
+	if (pcie->num_supplies == 0) {
+		dev_err(dev, "device %pOF not supported in legacy mode\n", np);
+		return -ENODEV;
+	}
+
+	pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
+				      sizeof(*pcie->supplies),
+				      GFP_KERNEL);
+	if (!pcie->supplies)
+		return -ENOMEM;
+
+	pcie->supplies[0].supply = "pex-clk";
+	pcie->supplies[1].supply = "vdd";
+
+	if (pcie->num_supplies > 2)
+		pcie->supplies[2].supply = "avdd";
+
+	return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
+}
+
+/*
+ * Obtains the list of regulators required for a particular generation of the
+ * IP block.
+ *
+ * This would've been nice to do simply by providing static tables for use
+ * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
+ * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
+ * and either seems to be optional depending on which ports are being used.
+ */
+static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
+{
+	struct device *dev = pcie->dev;
+	struct device_node *np = dev->of_node;
+	unsigned int i = 0;
+
+	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
+		pcie->num_supplies = 4;
+
+		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+					      sizeof(*pcie->supplies),
+					      GFP_KERNEL);
+		if (!pcie->supplies)
+			return -ENOMEM;
+
+		pcie->supplies[i++].supply = "dvdd-pex";
+		pcie->supplies[i++].supply = "hvdd-pex-pll";
+		pcie->supplies[i++].supply = "hvdd-pex";
+		pcie->supplies[i++].supply = "vddio-pexctl-aud";
+	} else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
+		pcie->num_supplies = 6;
+
+		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+					      sizeof(*pcie->supplies),
+					      GFP_KERNEL);
+		if (!pcie->supplies)
+			return -ENOMEM;
+
+		pcie->supplies[i++].supply = "avdd-pll-uerefe";
+		pcie->supplies[i++].supply = "hvddio-pex";
+		pcie->supplies[i++].supply = "dvddio-pex";
+		pcie->supplies[i++].supply = "dvdd-pex-pll";
+		pcie->supplies[i++].supply = "hvdd-pex-pll-e";
+		pcie->supplies[i++].supply = "vddio-pex-ctl";
+	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
+		pcie->num_supplies = 7;
+
+		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
+					      sizeof(*pcie->supplies),
+					      GFP_KERNEL);
+		if (!pcie->supplies)
+			return -ENOMEM;
+
+		pcie->supplies[i++].supply = "avddio-pex";
+		pcie->supplies[i++].supply = "dvddio-pex";
+		pcie->supplies[i++].supply = "avdd-pex-pll";
+		pcie->supplies[i++].supply = "hvdd-pex";
+		pcie->supplies[i++].supply = "hvdd-pex-pll-e";
+		pcie->supplies[i++].supply = "vddio-pex-ctl";
+		pcie->supplies[i++].supply = "avdd-pll-erefe";
+	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
+		bool need_pexa = false, need_pexb = false;
+
+		/* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
+		if (lane_mask & 0x0f)
+			need_pexa = true;
+
+		/* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
+		if (lane_mask & 0x30)
+			need_pexb = true;
+
+		pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
+					 (need_pexb ? 2 : 0);
+
+		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
+					      sizeof(*pcie->supplies),
+					      GFP_KERNEL);
+		if (!pcie->supplies)
+			return -ENOMEM;
+
+		pcie->supplies[i++].supply = "avdd-pex-pll";
+		pcie->supplies[i++].supply = "hvdd-pex";
+		pcie->supplies[i++].supply = "vddio-pex-ctl";
+		pcie->supplies[i++].supply = "avdd-plle";
+
+		if (need_pexa) {
+			pcie->supplies[i++].supply = "avdd-pexa";
+			pcie->supplies[i++].supply = "vdd-pexa";
+		}
+
+		if (need_pexb) {
+			pcie->supplies[i++].supply = "avdd-pexb";
+			pcie->supplies[i++].supply = "vdd-pexb";
+		}
+	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
+		pcie->num_supplies = 5;
+
+		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
+					      sizeof(*pcie->supplies),
+					      GFP_KERNEL);
+		if (!pcie->supplies)
+			return -ENOMEM;
+
+		pcie->supplies[0].supply = "avdd-pex";
+		pcie->supplies[1].supply = "vdd-pex";
+		pcie->supplies[2].supply = "avdd-pex-pll";
+		pcie->supplies[3].supply = "avdd-plle";
+		pcie->supplies[4].supply = "vddio-pex-clk";
+	}
+
+	if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
+					pcie->num_supplies))
+		return devm_regulator_bulk_get(dev, pcie->num_supplies,
+					       pcie->supplies);
+
+	/*
+	 * If not all regulators are available for this new scheme, assume
+	 * that the device tree complies with an older version of the device
+	 * tree binding.
+	 */
+	dev_info(dev, "using legacy DT binding for power supplies\n");
+
+	devm_kfree(dev, pcie->supplies);
+	pcie->num_supplies = 0;
+
+	return tegra_pcie_get_legacy_regulators(pcie);
+}
+
+static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	struct device_node *np = dev->of_node, *port;
+	const struct tegra_pcie_soc *soc = pcie->soc;
+	struct of_pci_range_parser parser;
+	struct of_pci_range range;
+	u32 lanes = 0, mask = 0;
+	unsigned int lane = 0;
+	struct resource res;
+	int err;
+
+	if (of_pci_range_parser_init(&parser, np)) {
+		dev_err(dev, "missing \"ranges\" property\n");
+		return -EINVAL;
+	}
+
+	for_each_of_pci_range(&parser, &range) {
+		err = of_pci_range_to_resource(&range, np, &res);
+		if (err < 0)
+			return err;
+
+		switch (res.flags & IORESOURCE_TYPE_BITS) {
+		case IORESOURCE_IO:
+			/* Track the bus -> CPU I/O mapping offset. */
+			pcie->offset.io = res.start - range.pci_addr;
+
+			memcpy(&pcie->pio, &res, sizeof(res));
+			pcie->pio.name = np->full_name;
+
+			/*
+			 * The Tegra PCIe host bridge uses this to program the
+			 * mapping of the I/O space to the physical address,
+			 * so we override the .start and .end fields here that
+			 * of_pci_range_to_resource() converted to I/O space.
+			 * We also set the IORESOURCE_MEM type to clarify that
+			 * the resource is in the physical memory space.
+			 */
+			pcie->io.start = range.cpu_addr;
+			pcie->io.end = range.cpu_addr + range.size - 1;
+			pcie->io.flags = IORESOURCE_MEM;
+			pcie->io.name = "I/O";
+
+			memcpy(&res, &pcie->io, sizeof(res));
+			break;
+
+		case IORESOURCE_MEM:
+			/*
+			 * Track the bus -> CPU memory mapping offset. This
+			 * assumes that the prefetchable and non-prefetchable
+			 * regions will be the last of type IORESOURCE_MEM in
+			 * the ranges property.
+			 * */
+			pcie->offset.mem = res.start - range.pci_addr;
+
+			if (res.flags & IORESOURCE_PREFETCH) {
+				memcpy(&pcie->prefetch, &res, sizeof(res));
+				pcie->prefetch.name = "prefetchable";
+			} else {
+				memcpy(&pcie->mem, &res, sizeof(res));
+				pcie->mem.name = "non-prefetchable";
+			}
+			break;
+		}
+	}
+
+	err = of_pci_parse_bus_range(np, &pcie->busn);
+	if (err < 0) {
+		dev_err(dev, "failed to parse ranges property: %d\n", err);
+		pcie->busn.name = np->name;
+		pcie->busn.start = 0;
+		pcie->busn.end = 0xff;
+		pcie->busn.flags = IORESOURCE_BUS;
+	}
+
+	/* parse root ports */
+	for_each_child_of_node(np, port) {
+		struct tegra_pcie_port *rp;
+		unsigned int index;
+		u32 value;
+		char *label;
+
+		err = of_pci_get_devfn(port);
+		if (err < 0) {
+			dev_err(dev, "failed to parse address: %d\n", err);
+			goto err_node_put;
+		}
+
+		index = PCI_SLOT(err);
+
+		if (index < 1 || index > soc->num_ports) {
+			dev_err(dev, "invalid port number: %d\n", index);
+			err = -EINVAL;
+			goto err_node_put;
+		}
+
+		index--;
+
+		err = of_property_read_u32(port, "nvidia,num-lanes", &value);
+		if (err < 0) {
+			dev_err(dev, "failed to parse # of lanes: %d\n",
+				err);
+			goto err_node_put;
+		}
+
+		if (value > 16) {
+			dev_err(dev, "invalid # of lanes: %u\n", value);
+			err = -EINVAL;
+			goto err_node_put;
+		}
+
+		lanes |= value << (index << 3);
+
+		if (!of_device_is_available(port)) {
+			lane += value;
+			continue;
+		}
+
+		mask |= ((1 << value) - 1) << lane;
+		lane += value;
+
+		rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
+		if (!rp) {
+			err = -ENOMEM;
+			goto err_node_put;
+		}
+
+		err = of_address_to_resource(port, 0, &rp->regs);
+		if (err < 0) {
+			dev_err(dev, "failed to parse address: %d\n", err);
+			goto err_node_put;
+		}
+
+		INIT_LIST_HEAD(&rp->list);
+		rp->index = index;
+		rp->lanes = value;
+		rp->pcie = pcie;
+		rp->np = port;
+
+		rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
+		if (IS_ERR(rp->base)) {
+			err = PTR_ERR(rp->base);
+			goto err_node_put;
+		}
+
+		label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
+		if (!label) {
+			err = -ENOMEM;
+			goto err_node_put;
+		}
+
+		/*
+		 * Returns -ENOENT if reset-gpios property is not populated
+		 * and in this case fall back to using AFI per port register
+		 * to toggle PERST# SFIO line.
+		 */
+		rp->reset_gpio = devm_gpiod_get_from_of_node(dev, port,
+							     "reset-gpios", 0,
+							     GPIOD_OUT_LOW,
+							     label);
+		if (IS_ERR(rp->reset_gpio)) {
+			if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
+				rp->reset_gpio = NULL;
+			} else {
+				dev_err(dev, "failed to get reset GPIO: %ld\n",
+					PTR_ERR(rp->reset_gpio));
+				err = PTR_ERR(rp->reset_gpio);
+				goto err_node_put;
+			}
+		}
+
+		list_add_tail(&rp->list, &pcie->ports);
+	}
+
+	err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
+	if (err < 0) {
+		dev_err(dev, "invalid lane configuration\n");
+		return err;
+	}
+
+	err = tegra_pcie_get_regulators(pcie, mask);
+	if (err < 0)
+		return err;
+
+	return 0;
+
+err_node_put:
+	of_node_put(port);
+	return err;
+}
+
+/*
+ * FIXME: If there are no PCIe cards attached, then calling this function
+ * can result in the increase of the bootup time as there are big timeout
+ * loops.
+ */
+#define TEGRA_PCIE_LINKUP_TIMEOUT	200	/* up to 1.2 seconds */
+static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
+{
+	struct device *dev = port->pcie->dev;
+	unsigned int retries = 3;
+	unsigned long value;
+
+	/* override presence detection */
+	value = readl(port->base + RP_PRIV_MISC);
+	value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
+	value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
+	writel(value, port->base + RP_PRIV_MISC);
+
+	do {
+		unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
+
+		do {
+			value = readl(port->base + RP_VEND_XP);
+
+			if (value & RP_VEND_XP_DL_UP)
+				break;
+
+			usleep_range(1000, 2000);
+		} while (--timeout);
+
+		if (!timeout) {
+			dev_dbg(dev, "link %u down, retrying\n", port->index);
+			goto retry;
+		}
+
+		timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
+
+		do {
+			value = readl(port->base + RP_LINK_CONTROL_STATUS);
+
+			if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
+				return true;
+
+			usleep_range(1000, 2000);
+		} while (--timeout);
+
+retry:
+		tegra_pcie_port_reset(port);
+	} while (--retries);
+
+	return false;
+}
+
+static void tegra_pcie_change_link_speed(struct tegra_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	struct tegra_pcie_port *port;
+	ktime_t deadline;
+	u32 value;
+
+	list_for_each_entry(port, &pcie->ports, list) {
+		/*
+		 * "Supported Link Speeds Vector" in "Link Capabilities 2"
+		 * is not supported by Tegra. tegra_pcie_change_link_speed()
+		 * is called only for Tegra chips which support Gen2.
+		 * So there no harm if supported link speed is not verified.
+		 */
+		value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
+		value &= ~PCI_EXP_LNKSTA_CLS;
+		value |= PCI_EXP_LNKSTA_CLS_5_0GB;
+		writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
+
+		/*
+		 * Poll until link comes back from recovery to avoid race
+		 * condition.
+		 */
+		deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
+
+		while (ktime_before(ktime_get(), deadline)) {
+			value = readl(port->base + RP_LINK_CONTROL_STATUS);
+			if ((value & PCI_EXP_LNKSTA_LT) == 0)
+				break;
+
+			usleep_range(2000, 3000);
+		}
+
+		if (value & PCI_EXP_LNKSTA_LT)
+			dev_warn(dev, "PCIe port %u link is in recovery\n",
+				 port->index);
+
+		/* Retrain the link */
+		value = readl(port->base + RP_LINK_CONTROL_STATUS);
+		value |= PCI_EXP_LNKCTL_RL;
+		writel(value, port->base + RP_LINK_CONTROL_STATUS);
+
+		deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
+
+		while (ktime_before(ktime_get(), deadline)) {
+			value = readl(port->base + RP_LINK_CONTROL_STATUS);
+			if ((value & PCI_EXP_LNKSTA_LT) == 0)
+				break;
+
+			usleep_range(2000, 3000);
+		}
+
+		if (value & PCI_EXP_LNKSTA_LT)
+			dev_err(dev, "failed to retrain link of port %u\n",
+				port->index);
+	}
+}
+
+static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	struct tegra_pcie_port *port, *tmp;
+
+	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+		dev_info(dev, "probing port %u, using %u lanes\n",
+			 port->index, port->lanes);
+
+		tegra_pcie_port_enable(port);
+	}
+
+	/* Start LTSSM from Tegra side */
+	reset_control_deassert(pcie->pcie_xrst);
+
+	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+		if (tegra_pcie_port_check_link(port))
+			continue;
+
+		dev_info(dev, "link %u down, ignoring\n", port->index);
+
+		tegra_pcie_port_disable(port);
+		tegra_pcie_port_free(port);
+	}
+
+	if (pcie->soc->has_gen2)
+		tegra_pcie_change_link_speed(pcie);
+}
+
+static void tegra_pcie_disable_ports(struct tegra_pcie *pcie)
+{
+	struct tegra_pcie_port *port, *tmp;
+
+	reset_control_assert(pcie->pcie_xrst);
+
+	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
+		tegra_pcie_port_disable(port);
+}
+
+static const struct tegra_pcie_port_soc tegra20_pcie_ports[] = {
+	{ .pme.turnoff_bit = 0, .pme.ack_bit =  5 },
+	{ .pme.turnoff_bit = 8, .pme.ack_bit = 10 },
+};
+
+static const struct tegra_pcie_soc tegra20_pcie = {
+	.num_ports = 2,
+	.ports = tegra20_pcie_ports,
+	.msi_base_shift = 0,
+	.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
+	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
+	.pads_refclk_cfg0 = 0xfa5cfa5c,
+	.has_pex_clkreq_en = false,
+	.has_pex_bias_ctrl = false,
+	.has_intr_prsnt_sense = false,
+	.has_cml_clk = false,
+	.has_gen2 = false,
+	.force_pca_enable = false,
+	.program_uphy = true,
+	.update_clamp_threshold = false,
+	.program_deskew_time = false,
+	.update_fc_timer = false,
+	.has_cache_bars = true,
+	.ectl.enable = false,
+};
+
+static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = {
+	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
+	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
+	{ .pme.turnoff_bit = 16, .pme.ack_bit = 18 },
+};
+
+static const struct tegra_pcie_soc tegra30_pcie = {
+	.num_ports = 3,
+	.ports = tegra30_pcie_ports,
+	.msi_base_shift = 8,
+	.afi_pex2_ctrl = 0x128,
+	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
+	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+	.pads_refclk_cfg0 = 0xfa5cfa5c,
+	.pads_refclk_cfg1 = 0xfa5cfa5c,
+	.has_pex_clkreq_en = true,
+	.has_pex_bias_ctrl = true,
+	.has_intr_prsnt_sense = true,
+	.has_cml_clk = true,
+	.has_gen2 = false,
+	.force_pca_enable = false,
+	.program_uphy = true,
+	.update_clamp_threshold = false,
+	.program_deskew_time = false,
+	.update_fc_timer = false,
+	.has_cache_bars = false,
+	.ectl.enable = false,
+};
+
+static const struct tegra_pcie_soc tegra124_pcie = {
+	.num_ports = 2,
+	.ports = tegra20_pcie_ports,
+	.msi_base_shift = 8,
+	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
+	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+	.pads_refclk_cfg0 = 0x44ac44ac,
+	.has_pex_clkreq_en = true,
+	.has_pex_bias_ctrl = true,
+	.has_intr_prsnt_sense = true,
+	.has_cml_clk = true,
+	.has_gen2 = true,
+	.force_pca_enable = false,
+	.program_uphy = true,
+	.update_clamp_threshold = true,
+	.program_deskew_time = false,
+	.update_fc_timer = false,
+	.has_cache_bars = false,
+	.ectl.enable = false,
+};
+
+static const struct tegra_pcie_soc tegra210_pcie = {
+	.num_ports = 2,
+	.ports = tegra20_pcie_ports,
+	.msi_base_shift = 8,
+	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
+	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+	.pads_refclk_cfg0 = 0x90b890b8,
+	/* FC threshold is bit[25:18] */
+	.update_fc_threshold = 0x01800000,
+	.has_pex_clkreq_en = true,
+	.has_pex_bias_ctrl = true,
+	.has_intr_prsnt_sense = true,
+	.has_cml_clk = true,
+	.has_gen2 = true,
+	.force_pca_enable = true,
+	.program_uphy = true,
+	.update_clamp_threshold = true,
+	.program_deskew_time = true,
+	.update_fc_timer = true,
+	.has_cache_bars = false,
+	.ectl = {
+		.regs = {
+			.rp_ectl_2_r1 = 0x0000000f,
+			.rp_ectl_4_r1 = 0x00000067,
+			.rp_ectl_5_r1 = 0x55010000,
+			.rp_ectl_6_r1 = 0x00000001,
+			.rp_ectl_2_r2 = 0x0000008f,
+			.rp_ectl_4_r2 = 0x000000c7,
+			.rp_ectl_5_r2 = 0x55010000,
+			.rp_ectl_6_r2 = 0x00000001,
+		},
+		.enable = true,
+	},
+};
+
+static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = {
+	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
+	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
+	{ .pme.turnoff_bit = 12, .pme.ack_bit = 14 },
+};
+
+static const struct tegra_pcie_soc tegra186_pcie = {
+	.num_ports = 3,
+	.ports = tegra186_pcie_ports,
+	.msi_base_shift = 8,
+	.afi_pex2_ctrl = 0x19c,
+	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
+	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+	.pads_refclk_cfg0 = 0x80b880b8,
+	.pads_refclk_cfg1 = 0x000480b8,
+	.has_pex_clkreq_en = true,
+	.has_pex_bias_ctrl = true,
+	.has_intr_prsnt_sense = true,
+	.has_cml_clk = false,
+	.has_gen2 = true,
+	.force_pca_enable = false,
+	.program_uphy = false,
+	.update_clamp_threshold = false,
+	.program_deskew_time = false,
+	.update_fc_timer = false,
+	.has_cache_bars = false,
+	.ectl.enable = false,
+};
+
+static const struct of_device_id tegra_pcie_of_match[] = {
+	{ .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie },
+	{ .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
+	{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
+	{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
+	{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
+
+static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
+{
+	struct tegra_pcie *pcie = s->private;
+
+	if (list_empty(&pcie->ports))
+		return NULL;
+
+	seq_printf(s, "Index  Status\n");
+
+	return seq_list_start(&pcie->ports, *pos);
+}
+
+static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+	struct tegra_pcie *pcie = s->private;
+
+	return seq_list_next(v, &pcie->ports, pos);
+}
+
+static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
+{
+	bool up = false, active = false;
+	struct tegra_pcie_port *port;
+	unsigned int value;
+
+	port = list_entry(v, struct tegra_pcie_port, list);
+
+	value = readl(port->base + RP_VEND_XP);
+
+	if (value & RP_VEND_XP_DL_UP)
+		up = true;
+
+	value = readl(port->base + RP_LINK_CONTROL_STATUS);
+
+	if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
+		active = true;
+
+	seq_printf(s, "%2u     ", port->index);
+
+	if (up)
+		seq_printf(s, "up");
+
+	if (active) {
+		if (up)
+			seq_printf(s, ", ");
+
+		seq_printf(s, "active");
+	}
+
+	seq_printf(s, "\n");
+	return 0;
+}
+
+static const struct seq_operations tegra_pcie_ports_seq_ops = {
+	.start = tegra_pcie_ports_seq_start,
+	.next = tegra_pcie_ports_seq_next,
+	.stop = tegra_pcie_ports_seq_stop,
+	.show = tegra_pcie_ports_seq_show,
+};
+
+static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
+{
+	struct tegra_pcie *pcie = inode->i_private;
+	struct seq_file *s;
+	int err;
+
+	err = seq_open(file, &tegra_pcie_ports_seq_ops);
+	if (err)
+		return err;
+
+	s = file->private_data;
+	s->private = pcie;
+
+	return 0;
+}
+
+static const struct file_operations tegra_pcie_ports_ops = {
+	.owner = THIS_MODULE,
+	.open = tegra_pcie_ports_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
+
+static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie)
+{
+	debugfs_remove_recursive(pcie->debugfs);
+	pcie->debugfs = NULL;
+}
+
+static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
+{
+	struct dentry *file;
+
+	pcie->debugfs = debugfs_create_dir("pcie", NULL);
+	if (!pcie->debugfs)
+		return -ENOMEM;
+
+	file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
+				   pcie, &tegra_pcie_ports_ops);
+	if (!file)
+		goto remove;
+
+	return 0;
+
+remove:
+	tegra_pcie_debugfs_exit(pcie);
+	return -ENOMEM;
+}
+
+static int tegra_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct pci_host_bridge *host;
+	struct tegra_pcie *pcie;
+	struct pci_bus *child;
+	int err;
+
+	host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+	if (!host)
+		return -ENOMEM;
+
+	pcie = pci_host_bridge_priv(host);
+	host->sysdata = pcie;
+	platform_set_drvdata(pdev, pcie);
+
+	pcie->soc = of_device_get_match_data(dev);
+	INIT_LIST_HEAD(&pcie->ports);
+	pcie->dev = dev;
+
+	err = tegra_pcie_parse_dt(pcie);
+	if (err < 0)
+		return err;
+
+	err = tegra_pcie_get_resources(pcie);
+	if (err < 0) {
+		dev_err(dev, "failed to request resources: %d\n", err);
+		return err;
+	}
+
+	err = tegra_pcie_msi_setup(pcie);
+	if (err < 0) {
+		dev_err(dev, "failed to enable MSI support: %d\n", err);
+		goto put_resources;
+	}
+
+	pm_runtime_enable(pcie->dev);
+	err = pm_runtime_get_sync(pcie->dev);
+	if (err < 0) {
+		dev_err(dev, "fail to enable pcie controller: %d\n", err);
+		goto pm_runtime_put;
+	}
+
+	err = tegra_pcie_request_resources(pcie);
+	if (err)
+		goto pm_runtime_put;
+
+	host->busnr = pcie->busn.start;
+	host->dev.parent = &pdev->dev;
+	host->ops = &tegra_pcie_ops;
+	host->map_irq = tegra_pcie_map_irq;
+	host->swizzle_irq = pci_common_swizzle;
+
+	err = pci_scan_root_bus_bridge(host);
+	if (err < 0) {
+		dev_err(dev, "failed to register host: %d\n", err);
+		goto free_resources;
+	}
+
+	pci_bus_size_bridges(host->bus);
+	pci_bus_assign_resources(host->bus);
+
+	list_for_each_entry(child, &host->bus->children, node)
+		pcie_bus_configure_settings(child);
+
+	pci_bus_add_devices(host->bus);
+
+	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+		err = tegra_pcie_debugfs_init(pcie);
+		if (err < 0)
+			dev_err(dev, "failed to setup debugfs: %d\n", err);
+	}
+
+	return 0;
+
+free_resources:
+	tegra_pcie_free_resources(pcie);
+pm_runtime_put:
+	pm_runtime_put_sync(pcie->dev);
+	pm_runtime_disable(pcie->dev);
+	tegra_pcie_msi_teardown(pcie);
+put_resources:
+	tegra_pcie_put_resources(pcie);
+	return err;
+}
+
+static int tegra_pcie_remove(struct platform_device *pdev)
+{
+	struct tegra_pcie *pcie = platform_get_drvdata(pdev);
+	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+	struct tegra_pcie_port *port, *tmp;
+
+	if (IS_ENABLED(CONFIG_DEBUG_FS))
+		tegra_pcie_debugfs_exit(pcie);
+
+	pci_stop_root_bus(host->bus);
+	pci_remove_root_bus(host->bus);
+	tegra_pcie_free_resources(pcie);
+	pm_runtime_put_sync(pcie->dev);
+	pm_runtime_disable(pcie->dev);
+
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		tegra_pcie_msi_teardown(pcie);
+
+	tegra_pcie_put_resources(pcie);
+
+	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
+		tegra_pcie_port_free(port);
+
+	return 0;
+}
+
+static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev)
+{
+	struct tegra_pcie *pcie = dev_get_drvdata(dev);
+	struct tegra_pcie_port *port;
+	int err;
+
+	list_for_each_entry(port, &pcie->ports, list)
+		tegra_pcie_pme_turnoff(port);
+
+	tegra_pcie_disable_ports(pcie);
+
+	/*
+	 * AFI_INTR is unmasked in tegra_pcie_enable_controller(), mask it to
+	 * avoid unwanted interrupts raised by AFI after pex_rst is asserted.
+	 */
+	tegra_pcie_disable_interrupts(pcie);
+
+	if (pcie->soc->program_uphy) {
+		err = tegra_pcie_phy_power_off(pcie);
+		if (err < 0)
+			dev_err(dev, "failed to power off PHY(s): %d\n", err);
+	}
+
+	reset_control_assert(pcie->pex_rst);
+	clk_disable_unprepare(pcie->pex_clk);
+
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		tegra_pcie_disable_msi(pcie);
+
+	pinctrl_pm_select_idle_state(dev);
+	tegra_pcie_power_off(pcie);
+
+	return 0;
+}
+
+static int __maybe_unused tegra_pcie_pm_resume(struct device *dev)
+{
+	struct tegra_pcie *pcie = dev_get_drvdata(dev);
+	int err;
+
+	err = tegra_pcie_power_on(pcie);
+	if (err) {
+		dev_err(dev, "tegra pcie power on fail: %d\n", err);
+		return err;
+	}
+
+	err = pinctrl_pm_select_default_state(dev);
+	if (err < 0) {
+		dev_err(dev, "failed to disable PCIe IO DPD: %d\n", err);
+		goto poweroff;
+	}
+
+	tegra_pcie_enable_controller(pcie);
+	tegra_pcie_setup_translations(pcie);
+
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		tegra_pcie_enable_msi(pcie);
+
+	err = clk_prepare_enable(pcie->pex_clk);
+	if (err) {
+		dev_err(dev, "failed to enable PEX clock: %d\n", err);
+		goto pex_dpd_enable;
+	}
+
+	reset_control_deassert(pcie->pex_rst);
+
+	if (pcie->soc->program_uphy) {
+		err = tegra_pcie_phy_power_on(pcie);
+		if (err < 0) {
+			dev_err(dev, "failed to power on PHY(s): %d\n", err);
+			goto disable_pex_clk;
+		}
+	}
+
+	tegra_pcie_apply_pad_settings(pcie);
+	tegra_pcie_enable_ports(pcie);
+
+	return 0;
+
+disable_pex_clk:
+	reset_control_assert(pcie->pex_rst);
+	clk_disable_unprepare(pcie->pex_clk);
+pex_dpd_enable:
+	pinctrl_pm_select_idle_state(dev);
+poweroff:
+	tegra_pcie_power_off(pcie);
+
+	return err;
+}
+
+static const struct dev_pm_ops tegra_pcie_pm_ops = {
+	SET_RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL)
+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend,
+				      tegra_pcie_pm_resume)
+};
+
+static struct platform_driver tegra_pcie_driver = {
+	.driver = {
+		.name = "tegra-pcie",
+		.of_match_table = tegra_pcie_of_match,
+		.suppress_bind_attrs = true,
+		.pm = &tegra_pcie_pm_ops,
+	},
+	.probe = tegra_pcie_probe,
+	.remove = tegra_pcie_remove,
+};
+module_platform_driver(tegra_pcie_driver);
+MODULE_LICENSE("GPL");
diff --git a/marvell/linux/drivers/pci/controller/pci-thunder-ecam.c b/marvell/linux/drivers/pci/controller/pci-thunder-ecam.c
new file mode 100644
index 0000000..18715d2
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pci-thunder-ecam.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015, 2016 Cavium, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/of_pci.h>
+#include <linux/of.h>
+#include <linux/pci-ecam.h>
+#include <linux/platform_device.h>
+
+#if defined(CONFIG_PCI_HOST_THUNDER_ECAM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
+
+static void set_val(u32 v, int where, int size, u32 *val)
+{
+	int shift = (where & 3) * 8;
+
+	pr_debug("set_val %04x: %08x\n", (unsigned)(where & ~3), v);
+	v >>= shift;
+	if (size == 1)
+		v &= 0xff;
+	else if (size == 2)
+		v &= 0xffff;
+	*val = v;
+}
+
+static int handle_ea_bar(u32 e0, int bar, struct pci_bus *bus,
+			 unsigned int devfn, int where, int size, u32 *val)
+{
+	void __iomem *addr;
+	u32 v;
+
+	/* Entries are 16-byte aligned; bits[2,3] select word in entry */
+	int where_a = where & 0xc;
+
+	if (where_a == 0) {
+		set_val(e0, where, size, val);
+		return PCIBIOS_SUCCESSFUL;
+	}
+	if (where_a == 0x4) {
+		addr = bus->ops->map_bus(bus, devfn, bar); /* BAR 0 */
+		if (!addr) {
+			*val = ~0;
+			return PCIBIOS_DEVICE_NOT_FOUND;
+		}
+		v = readl(addr);
+		v &= ~0xf;
+		v |= 2; /* EA entry-1. Base-L */
+		set_val(v, where, size, val);
+		return PCIBIOS_SUCCESSFUL;
+	}
+	if (where_a == 0x8) {
+		u32 barl_orig;
+		u32 barl_rb;
+
+		addr = bus->ops->map_bus(bus, devfn, bar); /* BAR 0 */
+		if (!addr) {
+			*val = ~0;
+			return PCIBIOS_DEVICE_NOT_FOUND;
+		}
+		barl_orig = readl(addr + 0);
+		writel(0xffffffff, addr + 0);
+		barl_rb = readl(addr + 0);
+		writel(barl_orig, addr + 0);
+		/* zeros in unsettable bits */
+		v = ~barl_rb & ~3;
+		v |= 0xc; /* EA entry-2. Offset-L */
+		set_val(v, where, size, val);
+		return PCIBIOS_SUCCESSFUL;
+	}
+	if (where_a == 0xc) {
+		addr = bus->ops->map_bus(bus, devfn, bar + 4); /* BAR 1 */
+		if (!addr) {
+			*val = ~0;
+			return PCIBIOS_DEVICE_NOT_FOUND;
+		}
+		v = readl(addr); /* EA entry-3. Base-H */
+		set_val(v, where, size, val);
+		return PCIBIOS_SUCCESSFUL;
+	}
+	return PCIBIOS_DEVICE_NOT_FOUND;
+}
+
+static int thunder_ecam_p2_config_read(struct pci_bus *bus, unsigned int devfn,
+				       int where, int size, u32 *val)
+{
+	struct pci_config_window *cfg = bus->sysdata;
+	int where_a = where & ~3;
+	void __iomem *addr;
+	u32 node_bits;
+	u32 v;
+
+	/* EA Base[63:32] may be missing some bits ... */
+	switch (where_a) {
+	case 0xa8:
+	case 0xbc:
+	case 0xd0:
+	case 0xe4:
+		break;
+	default:
+		return pci_generic_config_read(bus, devfn, where, size, val);
+	}
+
+	addr = bus->ops->map_bus(bus, devfn, where_a);
+	if (!addr) {
+		*val = ~0;
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+
+	v = readl(addr);
+
+	/*
+	 * Bit 44 of the 64-bit Base must match the same bit in
+	 * the config space access window.  Since we are working with
+	 * the high-order 32 bits, shift everything down by 32 bits.
+	 */
+	node_bits = upper_32_bits(cfg->res.start) & (1 << 12);
+
+	v |= node_bits;
+	set_val(v, where, size, val);
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn,
+				    int where, int size, u32 *val)
+{
+	u32 v;
+	u32 vendor_device;
+	u32 class_rev;
+	void __iomem *addr;
+	int cfg_type;
+	int where_a = where & ~3;
+
+	addr = bus->ops->map_bus(bus, devfn, 0xc);
+	if (!addr) {
+		*val = ~0;
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+
+	v = readl(addr);
+
+	/* Check for non type-00 header */
+	cfg_type = (v >> 16) & 0x7f;
+
+	addr = bus->ops->map_bus(bus, devfn, 8);
+	if (!addr) {
+		*val = ~0;
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+
+	class_rev = readl(addr);
+	if (class_rev == 0xffffffff)
+		goto no_emulation;
+
+	if ((class_rev & 0xff) >= 8) {
+		/* Pass-2 handling */
+		if (cfg_type)
+			goto no_emulation;
+		return thunder_ecam_p2_config_read(bus, devfn, where,
+						   size, val);
+	}
+
+	/*
+	 * All BARs have fixed addresses specified by the EA
+	 * capability; they must return zero on read.
+	 */
+	if (cfg_type == 0 &&
+	    ((where >= 0x10 && where < 0x2c) ||
+	     (where >= 0x1a4 && where < 0x1bc))) {
+		/* BAR or SR-IOV BAR */
+		*val = 0;
+		return PCIBIOS_SUCCESSFUL;
+	}
+
+	addr = bus->ops->map_bus(bus, devfn, 0);
+	if (!addr) {
+		*val = ~0;
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+
+	vendor_device = readl(addr);
+	if (vendor_device == 0xffffffff)
+		goto no_emulation;
+
+	pr_debug("%04x:%04x - Fix pass#: %08x, where: %03x, devfn: %03x\n",
+		 vendor_device & 0xffff, vendor_device >> 16, class_rev,
+		 (unsigned) where, devfn);
+
+	/* Check for non type-00 header */
+	if (cfg_type == 0) {
+		bool has_msix;
+		bool is_nic = (vendor_device == 0xa01e177d);
+		bool is_tns = (vendor_device == 0xa01f177d);
+
+		addr = bus->ops->map_bus(bus, devfn, 0x70);
+		if (!addr) {
+			*val = ~0;
+			return PCIBIOS_DEVICE_NOT_FOUND;
+		}
+		/* E_CAP */
+		v = readl(addr);
+		has_msix = (v & 0xff00) != 0;
+
+		if (!has_msix && where_a == 0x70) {
+			v |= 0xbc00; /* next capability is EA at 0xbc */
+			set_val(v, where, size, val);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		if (where_a == 0xb0) {
+			addr = bus->ops->map_bus(bus, devfn, where_a);
+			if (!addr) {
+				*val = ~0;
+				return PCIBIOS_DEVICE_NOT_FOUND;
+			}
+			v = readl(addr);
+			if (v & 0xff00)
+				pr_err("Bad MSIX cap header: %08x\n", v);
+			v |= 0xbc00; /* next capability is EA at 0xbc */
+			set_val(v, where, size, val);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		if (where_a == 0xbc) {
+			if (is_nic)
+				v = 0x40014; /* EA last in chain, 4 entries */
+			else if (is_tns)
+				v = 0x30014; /* EA last in chain, 3 entries */
+			else if (has_msix)
+				v = 0x20014; /* EA last in chain, 2 entries */
+			else
+				v = 0x10014; /* EA last in chain, 1 entry */
+			set_val(v, where, size, val);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		if (where_a >= 0xc0 && where_a < 0xd0)
+			/* EA entry-0. PP=0, BAR0 Size:3 */
+			return handle_ea_bar(0x80ff0003,
+					     0x10, bus, devfn, where,
+					     size, val);
+		if (where_a >= 0xd0 && where_a < 0xe0 && has_msix)
+			 /* EA entry-1. PP=0, BAR4 Size:3 */
+			return handle_ea_bar(0x80ff0043,
+					     0x20, bus, devfn, where,
+					     size, val);
+		if (where_a >= 0xe0 && where_a < 0xf0 && is_tns)
+			/* EA entry-2. PP=0, BAR2, Size:3 */
+			return handle_ea_bar(0x80ff0023,
+					     0x18, bus, devfn, where,
+					     size, val);
+		if (where_a >= 0xe0 && where_a < 0xf0 && is_nic)
+			/* EA entry-2. PP=4, VF_BAR0 (9), Size:3 */
+			return handle_ea_bar(0x80ff0493,
+					     0x1a4, bus, devfn, where,
+					     size, val);
+		if (where_a >= 0xf0 && where_a < 0x100 && is_nic)
+			/* EA entry-3. PP=4, VF_BAR4 (d), Size:3 */
+			return handle_ea_bar(0x80ff04d3,
+					     0x1b4, bus, devfn, where,
+					     size, val);
+	} else if (cfg_type == 1) {
+		bool is_rsl_bridge = devfn == 0x08;
+		bool is_rad_bridge = devfn == 0xa0;
+		bool is_zip_bridge = devfn == 0xa8;
+		bool is_dfa_bridge = devfn == 0xb0;
+		bool is_nic_bridge = devfn == 0x10;
+
+		if (where_a == 0x70) {
+			addr = bus->ops->map_bus(bus, devfn, where_a);
+			if (!addr) {
+				*val = ~0;
+				return PCIBIOS_DEVICE_NOT_FOUND;
+			}
+			v = readl(addr);
+			if (v & 0xff00)
+				pr_err("Bad PCIe cap header: %08x\n", v);
+			v |= 0xbc00; /* next capability is EA at 0xbc */
+			set_val(v, where, size, val);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		if (where_a == 0xbc) {
+			if (is_nic_bridge)
+				v = 0x10014; /* EA last in chain, 1 entry */
+			else
+				v = 0x00014; /* EA last in chain, no entries */
+			set_val(v, where, size, val);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		if (where_a == 0xc0) {
+			if (is_rsl_bridge || is_nic_bridge)
+				v = 0x0101; /* subordinate:secondary = 1:1 */
+			else if (is_rad_bridge)
+				v = 0x0202; /* subordinate:secondary = 2:2 */
+			else if (is_zip_bridge)
+				v = 0x0303; /* subordinate:secondary = 3:3 */
+			else if (is_dfa_bridge)
+				v = 0x0404; /* subordinate:secondary = 4:4 */
+			set_val(v, where, size, val);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		if (where_a == 0xc4 && is_nic_bridge) {
+			/* Enabled, not-Write, SP=ff, PP=05, BEI=6, ES=4 */
+			v = 0x80ff0564;
+			set_val(v, where, size, val);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		if (where_a == 0xc8 && is_nic_bridge) {
+			v = 0x00000002; /* Base-L 64-bit */
+			set_val(v, where, size, val);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		if (where_a == 0xcc && is_nic_bridge) {
+			v = 0xfffffffe; /* MaxOffset-L 64-bit */
+			set_val(v, where, size, val);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		if (where_a == 0xd0 && is_nic_bridge) {
+			v = 0x00008430; /* NIC Base-H */
+			set_val(v, where, size, val);
+			return PCIBIOS_SUCCESSFUL;
+		}
+		if (where_a == 0xd4 && is_nic_bridge) {
+			v = 0x0000000f; /* MaxOffset-H */
+			set_val(v, where, size, val);
+			return PCIBIOS_SUCCESSFUL;
+		}
+	}
+no_emulation:
+	return pci_generic_config_read(bus, devfn, where, size, val);
+}
+
+static int thunder_ecam_config_write(struct pci_bus *bus, unsigned int devfn,
+				     int where, int size, u32 val)
+{
+	/*
+	 * All BARs have fixed addresses; ignore BAR writes so they
+	 * don't get corrupted.
+	 */
+	if ((where >= 0x10 && where < 0x2c) ||
+	    (where >= 0x1a4 && where < 0x1bc))
+		/* BAR or SR-IOV BAR */
+		return PCIBIOS_SUCCESSFUL;
+
+	return pci_generic_config_write(bus, devfn, where, size, val);
+}
+
+struct pci_ecam_ops pci_thunder_ecam_ops = {
+	.bus_shift	= 20,
+	.pci_ops	= {
+		.map_bus        = pci_ecam_map_bus,
+		.read           = thunder_ecam_config_read,
+		.write          = thunder_ecam_config_write,
+	}
+};
+
+#ifdef CONFIG_PCI_HOST_THUNDER_ECAM
+
+static const struct of_device_id thunder_ecam_of_match[] = {
+	{ .compatible = "cavium,pci-host-thunder-ecam" },
+	{ },
+};
+
+static int thunder_ecam_probe(struct platform_device *pdev)
+{
+	return pci_host_common_probe(pdev, &pci_thunder_ecam_ops);
+}
+
+static struct platform_driver thunder_ecam_driver = {
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.of_match_table = thunder_ecam_of_match,
+		.suppress_bind_attrs = true,
+	},
+	.probe = thunder_ecam_probe,
+};
+builtin_platform_driver(thunder_ecam_driver);
+
+#endif
+#endif
diff --git a/marvell/linux/drivers/pci/controller/pci-thunder-pem.c b/marvell/linux/drivers/pci/controller/pci-thunder-pem.c
new file mode 100644
index 0000000..1650ec2
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pci-thunder-pem.c
@@ -0,0 +1,474 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015 - 2016 Cavium, Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
+#include <linux/platform_device.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include "../pci.h"
+
+#if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
+
+#define PEM_CFG_WR 0x28
+#define PEM_CFG_RD 0x30
+
+struct thunder_pem_pci {
+	u32		ea_entry[3];
+	void __iomem	*pem_reg_base;
+};
+
+static int thunder_pem_bridge_read(struct pci_bus *bus, unsigned int devfn,
+				   int where, int size, u32 *val)
+{
+	u64 read_val, tmp_val;
+	struct pci_config_window *cfg = bus->sysdata;
+	struct thunder_pem_pci *pem_pci = (struct thunder_pem_pci *)cfg->priv;
+
+	if (devfn != 0 || where >= 2048) {
+		*val = ~0;
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+
+	/*
+	 * 32-bit accesses only.  Write the address to the low order
+	 * bits of PEM_CFG_RD, then trigger the read by reading back.
+	 * The config data lands in the upper 32-bits of PEM_CFG_RD.
+	 */
+	read_val = where & ~3ull;
+	writeq(read_val, pem_pci->pem_reg_base + PEM_CFG_RD);
+	read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
+	read_val >>= 32;
+
+	/*
+	 * The config space contains some garbage, fix it up.  Also
+	 * synthesize an EA capability for the BAR used by MSI-X.
+	 */
+	switch (where & ~3) {
+	case 0x40:
+		read_val &= 0xffff00ff;
+		read_val |= 0x00007000; /* Skip MSI CAP */
+		break;
+	case 0x70: /* Express Cap */
+		/*
+		 * Change PME interrupt to vector 2 on T88 where it
+		 * reads as 0, else leave it alone.
+		 */
+		if (!(read_val & (0x1f << 25)))
+			read_val |= (2u << 25);
+		break;
+	case 0xb0: /* MSI-X Cap */
+		/* TableSize=2 or 4, Next Cap is EA */
+		read_val &= 0xc00000ff;
+		/*
+		 * If Express Cap(0x70) raw PME vector reads as 0 we are on
+		 * T88 and TableSize is reported as 4, else TableSize
+		 * is 2.
+		 */
+		writeq(0x70, pem_pci->pem_reg_base + PEM_CFG_RD);
+		tmp_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
+		tmp_val >>= 32;
+		if (!(tmp_val & (0x1f << 25)))
+			read_val |= 0x0003bc00;
+		else
+			read_val |= 0x0001bc00;
+		break;
+	case 0xb4:
+		/* Table offset=0, BIR=0 */
+		read_val = 0x00000000;
+		break;
+	case 0xb8:
+		/* BPA offset=0xf0000, BIR=0 */
+		read_val = 0x000f0000;
+		break;
+	case 0xbc:
+		/* EA, 1 entry, no next Cap */
+		read_val = 0x00010014;
+		break;
+	case 0xc0:
+		/* DW2 for type-1 */
+		read_val = 0x00000000;
+		break;
+	case 0xc4:
+		/* Entry BEI=0, PP=0x00, SP=0xff, ES=3 */
+		read_val = 0x80ff0003;
+		break;
+	case 0xc8:
+		read_val = pem_pci->ea_entry[0];
+		break;
+	case 0xcc:
+		read_val = pem_pci->ea_entry[1];
+		break;
+	case 0xd0:
+		read_val = pem_pci->ea_entry[2];
+		break;
+	default:
+		break;
+	}
+	read_val >>= (8 * (where & 3));
+	switch (size) {
+	case 1:
+		read_val &= 0xff;
+		break;
+	case 2:
+		read_val &= 0xffff;
+		break;
+	default:
+		break;
+	}
+	*val = read_val;
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int thunder_pem_config_read(struct pci_bus *bus, unsigned int devfn,
+				   int where, int size, u32 *val)
+{
+	struct pci_config_window *cfg = bus->sysdata;
+
+	if (bus->number < cfg->busr.start ||
+	    bus->number > cfg->busr.end)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	/*
+	 * The first device on the bus is the PEM PCIe bridge.
+	 * Special case its config access.
+	 */
+	if (bus->number == cfg->busr.start)
+		return thunder_pem_bridge_read(bus, devfn, where, size, val);
+
+	return pci_generic_config_read(bus, devfn, where, size, val);
+}
+
+/*
+ * Some of the w1c_bits below also include read-only or non-writable
+ * reserved bits, this makes the code simpler and is OK as the bits
+ * are not affected by writing zeros to them.
+ */
+static u32 thunder_pem_bridge_w1c_bits(u64 where_aligned)
+{
+	u32 w1c_bits = 0;
+
+	switch (where_aligned) {
+	case 0x04: /* Command/Status */
+	case 0x1c: /* Base and I/O Limit/Secondary Status */
+		w1c_bits = 0xff000000;
+		break;
+	case 0x44: /* Power Management Control and Status */
+		w1c_bits = 0xfffffe00;
+		break;
+	case 0x78: /* Device Control/Device Status */
+	case 0x80: /* Link Control/Link Status */
+	case 0x88: /* Slot Control/Slot Status */
+	case 0x90: /* Root Status */
+	case 0xa0: /* Link Control 2 Registers/Link Status 2 */
+		w1c_bits = 0xffff0000;
+		break;
+	case 0x104: /* Uncorrectable Error Status */
+	case 0x110: /* Correctable Error Status */
+	case 0x130: /* Error Status */
+	case 0x160: /* Link Control 4 */
+		w1c_bits = 0xffffffff;
+		break;
+	default:
+		break;
+	}
+	return w1c_bits;
+}
+
+/* Some bits must be written to one so they appear to be read-only. */
+static u32 thunder_pem_bridge_w1_bits(u64 where_aligned)
+{
+	u32 w1_bits;
+
+	switch (where_aligned) {
+	case 0x1c: /* I/O Base / I/O Limit, Secondary Status */
+		/* Force 32-bit I/O addressing. */
+		w1_bits = 0x0101;
+		break;
+	case 0x24: /* Prefetchable Memory Base / Prefetchable Memory Limit */
+		/* Force 64-bit addressing */
+		w1_bits = 0x00010001;
+		break;
+	default:
+		w1_bits = 0;
+		break;
+	}
+	return w1_bits;
+}
+
+static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn,
+				    int where, int size, u32 val)
+{
+	struct pci_config_window *cfg = bus->sysdata;
+	struct thunder_pem_pci *pem_pci = (struct thunder_pem_pci *)cfg->priv;
+	u64 write_val, read_val;
+	u64 where_aligned = where & ~3ull;
+	u32 mask = 0;
+
+
+	if (devfn != 0 || where >= 2048)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	/*
+	 * 32-bit accesses only.  If the write is for a size smaller
+	 * than 32-bits, we must first read the 32-bit value and merge
+	 * in the desired bits and then write the whole 32-bits back
+	 * out.
+	 */
+	switch (size) {
+	case 1:
+		writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD);
+		read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
+		read_val >>= 32;
+		mask = ~(0xff << (8 * (where & 3)));
+		read_val &= mask;
+		val = (val & 0xff) << (8 * (where & 3));
+		val |= (u32)read_val;
+		break;
+	case 2:
+		writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD);
+		read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
+		read_val >>= 32;
+		mask = ~(0xffff << (8 * (where & 3)));
+		read_val &= mask;
+		val = (val & 0xffff) << (8 * (where & 3));
+		val |= (u32)read_val;
+		break;
+	default:
+		break;
+	}
+
+	/*
+	 * By expanding the write width to 32 bits, we may
+	 * inadvertently hit some W1C bits that were not intended to
+	 * be written.  Calculate the mask that must be applied to the
+	 * data to be written to avoid these cases.
+	 */
+	if (mask) {
+		u32 w1c_bits = thunder_pem_bridge_w1c_bits(where);
+
+		if (w1c_bits) {
+			mask &= w1c_bits;
+			val &= ~mask;
+		}
+	}
+
+	/*
+	 * Some bits must be read-only with value of one.  Since the
+	 * access method allows these to be cleared if a zero is
+	 * written, force them to one before writing.
+	 */
+	val |= thunder_pem_bridge_w1_bits(where_aligned);
+
+	/*
+	 * Low order bits are the config address, the high order 32
+	 * bits are the data to be written.
+	 */
+	write_val = (((u64)val) << 32) | where_aligned;
+	writeq(write_val, pem_pci->pem_reg_base + PEM_CFG_WR);
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int thunder_pem_config_write(struct pci_bus *bus, unsigned int devfn,
+				    int where, int size, u32 val)
+{
+	struct pci_config_window *cfg = bus->sysdata;
+
+	if (bus->number < cfg->busr.start ||
+	    bus->number > cfg->busr.end)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	/*
+	 * The first device on the bus is the PEM PCIe bridge.
+	 * Special case its config access.
+	 */
+	if (bus->number == cfg->busr.start)
+		return thunder_pem_bridge_write(bus, devfn, where, size, val);
+
+
+	return pci_generic_config_write(bus, devfn, where, size, val);
+}
+
+static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
+			    struct resource *res_pem)
+{
+	struct thunder_pem_pci *pem_pci;
+	resource_size_t bar4_start;
+
+	pem_pci = devm_kzalloc(dev, sizeof(*pem_pci), GFP_KERNEL);
+	if (!pem_pci)
+		return -ENOMEM;
+
+	pem_pci->pem_reg_base = devm_ioremap(dev, res_pem->start, 0x10000);
+	if (!pem_pci->pem_reg_base)
+		return -ENOMEM;
+
+	/*
+	 * The MSI-X BAR for the PEM and AER interrupts is located at
+	 * a fixed offset from the PEM register base.  Generate a
+	 * fragment of the synthesized Enhanced Allocation capability
+	 * structure here for the BAR.
+	 */
+	bar4_start = res_pem->start + 0xf00000;
+	pem_pci->ea_entry[0] = lower_32_bits(bar4_start) | 2;
+	pem_pci->ea_entry[1] = lower_32_bits(res_pem->end - bar4_start) & ~3u;
+	pem_pci->ea_entry[2] = upper_32_bits(bar4_start);
+
+	cfg->priv = pem_pci;
+	return 0;
+}
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+
+#define PEM_RES_BASE		0x87e0c0000000ULL
+#define PEM_NODE_MASK		GENMASK_ULL(45, 44)
+#define PEM_INDX_MASK		GENMASK_ULL(26, 24)
+#define PEM_MIN_DOM_IN_NODE	4
+#define PEM_MAX_DOM_IN_NODE	10
+
+static void thunder_pem_reserve_range(struct device *dev, int seg,
+				      struct resource *r)
+{
+	resource_size_t start = r->start, end = r->end;
+	struct resource *res;
+	const char *regionid;
+
+	regionid = kasprintf(GFP_KERNEL, "PEM RC:%d", seg);
+	if (!regionid)
+		return;
+
+	res = request_mem_region(start, end - start + 1, regionid);
+	if (res)
+		res->flags &= ~IORESOURCE_BUSY;
+	else
+		kfree(regionid);
+
+	dev_info(dev, "%pR %s reserved\n", r,
+		 res ? "has been" : "could not be");
+}
+
+static void thunder_pem_legacy_fw(struct acpi_pci_root *root,
+				 struct resource *res_pem)
+{
+	int node = acpi_get_node(root->device->handle);
+	int index;
+
+	if (node == NUMA_NO_NODE)
+		node = 0;
+
+	index = root->segment - PEM_MIN_DOM_IN_NODE;
+	index -= node * PEM_MAX_DOM_IN_NODE;
+	res_pem->start = PEM_RES_BASE | FIELD_PREP(PEM_NODE_MASK, node) |
+					FIELD_PREP(PEM_INDX_MASK, index);
+	res_pem->flags = IORESOURCE_MEM;
+}
+
+static int thunder_pem_acpi_init(struct pci_config_window *cfg)
+{
+	struct device *dev = cfg->parent;
+	struct acpi_device *adev = to_acpi_device(dev);
+	struct acpi_pci_root *root = acpi_driver_data(adev);
+	struct resource *res_pem;
+	int ret;
+
+	res_pem = devm_kzalloc(&adev->dev, sizeof(*res_pem), GFP_KERNEL);
+	if (!res_pem)
+		return -ENOMEM;
+
+	ret = acpi_get_rc_resources(dev, "CAVA02B", root->segment, res_pem);
+
+	/*
+	 * If we fail to gather resources it means that we run with old
+	 * FW where we need to calculate PEM-specific resources manually.
+	 */
+	if (ret) {
+		thunder_pem_legacy_fw(root, res_pem);
+		/*
+		 * Reserve 64K size PEM specific resources. The full 16M range
+		 * size is required for thunder_pem_init() call.
+		 */
+		res_pem->end = res_pem->start + SZ_64K - 1;
+		thunder_pem_reserve_range(dev, root->segment, res_pem);
+		res_pem->end = res_pem->start + SZ_16M - 1;
+
+		/* Reserve PCI configuration space as well. */
+		thunder_pem_reserve_range(dev, root->segment, &cfg->res);
+	}
+
+	return thunder_pem_init(dev, cfg, res_pem);
+}
+
+struct pci_ecam_ops thunder_pem_ecam_ops = {
+	.bus_shift	= 24,
+	.init		= thunder_pem_acpi_init,
+	.pci_ops	= {
+		.map_bus	= pci_ecam_map_bus,
+		.read		= thunder_pem_config_read,
+		.write		= thunder_pem_config_write,
+	}
+};
+
+#endif
+
+#ifdef CONFIG_PCI_HOST_THUNDER_PEM
+
+static int thunder_pem_platform_init(struct pci_config_window *cfg)
+{
+	struct device *dev = cfg->parent;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct resource *res_pem;
+
+	if (!dev->of_node)
+		return -EINVAL;
+
+	/*
+	 * The second register range is the PEM bridge to the PCIe
+	 * bus.  It has a different config access method than those
+	 * devices behind the bridge.
+	 */
+	res_pem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!res_pem) {
+		dev_err(dev, "missing \"reg[1]\"property\n");
+		return -EINVAL;
+	}
+
+	return thunder_pem_init(dev, cfg, res_pem);
+}
+
+static struct pci_ecam_ops pci_thunder_pem_ops = {
+	.bus_shift	= 24,
+	.init		= thunder_pem_platform_init,
+	.pci_ops	= {
+		.map_bus	= pci_ecam_map_bus,
+		.read		= thunder_pem_config_read,
+		.write		= thunder_pem_config_write,
+	}
+};
+
+static const struct of_device_id thunder_pem_of_match[] = {
+	{ .compatible = "cavium,pci-host-thunder-pem" },
+	{ },
+};
+
+static int thunder_pem_probe(struct platform_device *pdev)
+{
+	return pci_host_common_probe(pdev, &pci_thunder_pem_ops);
+}
+
+static struct platform_driver thunder_pem_driver = {
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.of_match_table = thunder_pem_of_match,
+		.suppress_bind_attrs = true,
+	},
+	.probe = thunder_pem_probe,
+};
+builtin_platform_driver(thunder_pem_driver);
+
+#endif
+#endif
diff --git a/marvell/linux/drivers/pci/controller/pci-v3-semi.c b/marvell/linux/drivers/pci/controller/pci-v3-semi.c
new file mode 100644
index 0000000..9a86bb7
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pci-v3-semi.c
@@ -0,0 +1,963 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for V3 Semiconductor PCI Local Bus to PCI Bridge
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on the code from arch/arm/mach-integrator/pci_v3.c
+ * Copyright (C) 1999 ARM Limited
+ * Copyright (C) 2000-2001 Deep Blue Solutions Ltd
+ *
+ * Contributors to the old driver include:
+ * Russell King <linux@armlinux.org.uk>
+ * David A. Rusling <david.rusling@linaro.org> (uHAL, ARM Firmware suite)
+ * Rob Herring <robh@kernel.org>
+ * Liviu Dudau <Liviu.Dudau@arm.com>
+ * Grant Likely <grant.likely@secretlab.ca>
+ * Arnd Bergmann <arnd@arndb.de>
+ * Bjorn Helgaas <bhelgaas@google.com>
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+
+#include "../pci.h"
+
+#define V3_PCI_VENDOR			0x00000000
+#define V3_PCI_DEVICE			0x00000002
+#define V3_PCI_CMD			0x00000004
+#define V3_PCI_STAT			0x00000006
+#define V3_PCI_CC_REV			0x00000008
+#define V3_PCI_HDR_CFG			0x0000000C
+#define V3_PCI_IO_BASE			0x00000010
+#define V3_PCI_BASE0			0x00000014
+#define V3_PCI_BASE1			0x00000018
+#define V3_PCI_SUB_VENDOR		0x0000002C
+#define V3_PCI_SUB_ID			0x0000002E
+#define V3_PCI_ROM			0x00000030
+#define V3_PCI_BPARAM			0x0000003C
+#define V3_PCI_MAP0			0x00000040
+#define V3_PCI_MAP1			0x00000044
+#define V3_PCI_INT_STAT			0x00000048
+#define V3_PCI_INT_CFG			0x0000004C
+#define V3_LB_BASE0			0x00000054
+#define V3_LB_BASE1			0x00000058
+#define V3_LB_MAP0			0x0000005E
+#define V3_LB_MAP1			0x00000062
+#define V3_LB_BASE2			0x00000064
+#define V3_LB_MAP2			0x00000066
+#define V3_LB_SIZE			0x00000068
+#define V3_LB_IO_BASE			0x0000006E
+#define V3_FIFO_CFG			0x00000070
+#define V3_FIFO_PRIORITY		0x00000072
+#define V3_FIFO_STAT			0x00000074
+#define V3_LB_ISTAT			0x00000076
+#define V3_LB_IMASK			0x00000077
+#define V3_SYSTEM			0x00000078
+#define V3_LB_CFG			0x0000007A
+#define V3_PCI_CFG			0x0000007C
+#define V3_DMA_PCI_ADR0			0x00000080
+#define V3_DMA_PCI_ADR1			0x00000090
+#define V3_DMA_LOCAL_ADR0		0x00000084
+#define V3_DMA_LOCAL_ADR1		0x00000094
+#define V3_DMA_LENGTH0			0x00000088
+#define V3_DMA_LENGTH1			0x00000098
+#define V3_DMA_CSR0			0x0000008B
+#define V3_DMA_CSR1			0x0000009B
+#define V3_DMA_CTLB_ADR0		0x0000008C
+#define V3_DMA_CTLB_ADR1		0x0000009C
+#define V3_DMA_DELAY			0x000000E0
+#define V3_MAIL_DATA			0x000000C0
+#define V3_PCI_MAIL_IEWR		0x000000D0
+#define V3_PCI_MAIL_IERD		0x000000D2
+#define V3_LB_MAIL_IEWR			0x000000D4
+#define V3_LB_MAIL_IERD			0x000000D6
+#define V3_MAIL_WR_STAT			0x000000D8
+#define V3_MAIL_RD_STAT			0x000000DA
+#define V3_QBA_MAP			0x000000DC
+
+/* PCI STATUS bits */
+#define V3_PCI_STAT_PAR_ERR		BIT(15)
+#define V3_PCI_STAT_SYS_ERR		BIT(14)
+#define V3_PCI_STAT_M_ABORT_ERR		BIT(13)
+#define V3_PCI_STAT_T_ABORT_ERR		BIT(12)
+
+/* LB ISTAT bits */
+#define V3_LB_ISTAT_MAILBOX		BIT(7)
+#define V3_LB_ISTAT_PCI_RD		BIT(6)
+#define V3_LB_ISTAT_PCI_WR		BIT(5)
+#define V3_LB_ISTAT_PCI_INT		BIT(4)
+#define V3_LB_ISTAT_PCI_PERR		BIT(3)
+#define V3_LB_ISTAT_I2O_QWR		BIT(2)
+#define V3_LB_ISTAT_DMA1		BIT(1)
+#define V3_LB_ISTAT_DMA0		BIT(0)
+
+/* PCI COMMAND bits */
+#define V3_COMMAND_M_FBB_EN		BIT(9)
+#define V3_COMMAND_M_SERR_EN		BIT(8)
+#define V3_COMMAND_M_PAR_EN		BIT(6)
+#define V3_COMMAND_M_MASTER_EN		BIT(2)
+#define V3_COMMAND_M_MEM_EN		BIT(1)
+#define V3_COMMAND_M_IO_EN		BIT(0)
+
+/* SYSTEM bits */
+#define V3_SYSTEM_M_RST_OUT		BIT(15)
+#define V3_SYSTEM_M_LOCK		BIT(14)
+#define V3_SYSTEM_UNLOCK		0xa05f
+
+/* PCI CFG bits */
+#define V3_PCI_CFG_M_I2O_EN		BIT(15)
+#define V3_PCI_CFG_M_IO_REG_DIS		BIT(14)
+#define V3_PCI_CFG_M_IO_DIS		BIT(13)
+#define V3_PCI_CFG_M_EN3V		BIT(12)
+#define V3_PCI_CFG_M_RETRY_EN		BIT(10)
+#define V3_PCI_CFG_M_AD_LOW1		BIT(9)
+#define V3_PCI_CFG_M_AD_LOW0		BIT(8)
+/*
+ * This is the value applied to C/BE[3:1], with bit 0 always held 0
+ * during DMA access.
+ */
+#define V3_PCI_CFG_M_RTYPE_SHIFT	5
+#define V3_PCI_CFG_M_WTYPE_SHIFT	1
+#define V3_PCI_CFG_TYPE_DEFAULT		0x3
+
+/* PCI BASE bits (PCI -> Local Bus) */
+#define V3_PCI_BASE_M_ADR_BASE		0xFFF00000U
+#define V3_PCI_BASE_M_ADR_BASEL		0x000FFF00U
+#define V3_PCI_BASE_M_PREFETCH		BIT(3)
+#define V3_PCI_BASE_M_TYPE		(3 << 1)
+#define V3_PCI_BASE_M_IO		BIT(0)
+
+/* PCI MAP bits (PCI -> Local bus) */
+#define V3_PCI_MAP_M_MAP_ADR		0xFFF00000U
+#define V3_PCI_MAP_M_RD_POST_INH	BIT(15)
+#define V3_PCI_MAP_M_ROM_SIZE		(3 << 10)
+#define V3_PCI_MAP_M_SWAP		(3 << 8)
+#define V3_PCI_MAP_M_ADR_SIZE		0x000000F0U
+#define V3_PCI_MAP_M_REG_EN		BIT(1)
+#define V3_PCI_MAP_M_ENABLE		BIT(0)
+
+/* LB_BASE0,1 bits (Local bus -> PCI) */
+#define V3_LB_BASE_ADR_BASE		0xfff00000U
+#define V3_LB_BASE_SWAP			(3 << 8)
+#define V3_LB_BASE_ADR_SIZE		(15 << 4)
+#define V3_LB_BASE_PREFETCH		BIT(3)
+#define V3_LB_BASE_ENABLE		BIT(0)
+
+#define V3_LB_BASE_ADR_SIZE_1MB		(0 << 4)
+#define V3_LB_BASE_ADR_SIZE_2MB		(1 << 4)
+#define V3_LB_BASE_ADR_SIZE_4MB		(2 << 4)
+#define V3_LB_BASE_ADR_SIZE_8MB		(3 << 4)
+#define V3_LB_BASE_ADR_SIZE_16MB	(4 << 4)
+#define V3_LB_BASE_ADR_SIZE_32MB	(5 << 4)
+#define V3_LB_BASE_ADR_SIZE_64MB	(6 << 4)
+#define V3_LB_BASE_ADR_SIZE_128MB	(7 << 4)
+#define V3_LB_BASE_ADR_SIZE_256MB	(8 << 4)
+#define V3_LB_BASE_ADR_SIZE_512MB	(9 << 4)
+#define V3_LB_BASE_ADR_SIZE_1GB		(10 << 4)
+#define V3_LB_BASE_ADR_SIZE_2GB		(11 << 4)
+
+#define v3_addr_to_lb_base(a)	((a) & V3_LB_BASE_ADR_BASE)
+
+/* LB_MAP0,1 bits (Local bus -> PCI) */
+#define V3_LB_MAP_MAP_ADR		0xfff0U
+#define V3_LB_MAP_TYPE			(7 << 1)
+#define V3_LB_MAP_AD_LOW_EN		BIT(0)
+
+#define V3_LB_MAP_TYPE_IACK		(0 << 1)
+#define V3_LB_MAP_TYPE_IO		(1 << 1)
+#define V3_LB_MAP_TYPE_MEM		(3 << 1)
+#define V3_LB_MAP_TYPE_CONFIG		(5 << 1)
+#define V3_LB_MAP_TYPE_MEM_MULTIPLE	(6 << 1)
+
+#define v3_addr_to_lb_map(a)	(((a) >> 16) & V3_LB_MAP_MAP_ADR)
+
+/* LB_BASE2 bits (Local bus -> PCI IO) */
+#define V3_LB_BASE2_ADR_BASE		0xff00U
+#define V3_LB_BASE2_SWAP_AUTO		(3 << 6)
+#define V3_LB_BASE2_ENABLE		BIT(0)
+
+#define v3_addr_to_lb_base2(a)	(((a) >> 16) & V3_LB_BASE2_ADR_BASE)
+
+/* LB_MAP2 bits (Local bus -> PCI IO) */
+#define V3_LB_MAP2_MAP_ADR		0xff00U
+
+#define v3_addr_to_lb_map2(a)	(((a) >> 16) & V3_LB_MAP2_MAP_ADR)
+
+/* FIFO priority bits */
+#define V3_FIFO_PRIO_LOCAL		BIT(12)
+#define V3_FIFO_PRIO_LB_RD1_FLUSH_EOB	BIT(10)
+#define V3_FIFO_PRIO_LB_RD1_FLUSH_AP1	BIT(11)
+#define V3_FIFO_PRIO_LB_RD1_FLUSH_ANY	(BIT(10)|BIT(11))
+#define V3_FIFO_PRIO_LB_RD0_FLUSH_EOB	BIT(8)
+#define V3_FIFO_PRIO_LB_RD0_FLUSH_AP1	BIT(9)
+#define V3_FIFO_PRIO_LB_RD0_FLUSH_ANY	(BIT(8)|BIT(9))
+#define V3_FIFO_PRIO_PCI		BIT(4)
+#define V3_FIFO_PRIO_PCI_RD1_FLUSH_EOB	BIT(2)
+#define V3_FIFO_PRIO_PCI_RD1_FLUSH_AP1	BIT(3)
+#define V3_FIFO_PRIO_PCI_RD1_FLUSH_ANY	(BIT(2)|BIT(3))
+#define V3_FIFO_PRIO_PCI_RD0_FLUSH_EOB	BIT(0)
+#define V3_FIFO_PRIO_PCI_RD0_FLUSH_AP1	BIT(1)
+#define V3_FIFO_PRIO_PCI_RD0_FLUSH_ANY	(BIT(0)|BIT(1))
+
+/* Local bus configuration bits */
+#define V3_LB_CFG_LB_TO_64_CYCLES	0x0000
+#define V3_LB_CFG_LB_TO_256_CYCLES	BIT(13)
+#define V3_LB_CFG_LB_TO_512_CYCLES	BIT(14)
+#define V3_LB_CFG_LB_TO_1024_CYCLES	(BIT(13)|BIT(14))
+#define V3_LB_CFG_LB_RST		BIT(12)
+#define V3_LB_CFG_LB_PPC_RDY		BIT(11)
+#define V3_LB_CFG_LB_LB_INT		BIT(10)
+#define V3_LB_CFG_LB_ERR_EN		BIT(9)
+#define V3_LB_CFG_LB_RDY_EN		BIT(8)
+#define V3_LB_CFG_LB_BE_IMODE		BIT(7)
+#define V3_LB_CFG_LB_BE_OMODE		BIT(6)
+#define V3_LB_CFG_LB_ENDIAN		BIT(5)
+#define V3_LB_CFG_LB_PARK_EN		BIT(4)
+#define V3_LB_CFG_LB_FBB_DIS		BIT(2)
+
+/* ARM Integrator-specific extended control registers */
+#define INTEGRATOR_SC_PCI_OFFSET	0x18
+#define INTEGRATOR_SC_PCI_ENABLE	BIT(0)
+#define INTEGRATOR_SC_PCI_INTCLR	BIT(1)
+#define INTEGRATOR_SC_LBFADDR_OFFSET	0x20
+#define INTEGRATOR_SC_LBFCODE_OFFSET	0x24
+
+struct v3_pci {
+	struct device *dev;
+	void __iomem *base;
+	void __iomem *config_base;
+	struct pci_bus *bus;
+	u32 config_mem;
+	u32 io_mem;
+	u32 non_pre_mem;
+	u32 pre_mem;
+	phys_addr_t io_bus_addr;
+	phys_addr_t non_pre_bus_addr;
+	phys_addr_t pre_bus_addr;
+	struct regmap *map;
+};
+
+/*
+ * The V3 PCI interface chip in Integrator provides several windows from
+ * local bus memory into the PCI memory areas. Unfortunately, there
+ * are not really enough windows for our usage, therefore we reuse
+ * one of the windows for access to PCI configuration space. On the
+ * Integrator/AP, the memory map is as follows:
+ *
+ * Local Bus Memory         Usage
+ *
+ * 40000000 - 4FFFFFFF      PCI memory.  256M non-prefetchable
+ * 50000000 - 5FFFFFFF      PCI memory.  256M prefetchable
+ * 60000000 - 60FFFFFF      PCI IO.  16M
+ * 61000000 - 61FFFFFF      PCI Configuration. 16M
+ *
+ * There are three V3 windows, each described by a pair of V3 registers.
+ * These are LB_BASE0/LB_MAP0, LB_BASE1/LB_MAP1 and LB_BASE2/LB_MAP2.
+ * Base0 and Base1 can be used for any type of PCI memory access.   Base2
+ * can be used either for PCI I/O or for I20 accesses.  By default, uHAL
+ * uses this only for PCI IO space.
+ *
+ * Normally these spaces are mapped using the following base registers:
+ *
+ * Usage Local Bus Memory         Base/Map registers used
+ *
+ * Mem   40000000 - 4FFFFFFF      LB_BASE0/LB_MAP0
+ * Mem   50000000 - 5FFFFFFF      LB_BASE1/LB_MAP1
+ * IO    60000000 - 60FFFFFF      LB_BASE2/LB_MAP2
+ * Cfg   61000000 - 61FFFFFF
+ *
+ * This means that I20 and PCI configuration space accesses will fail.
+ * When PCI configuration accesses are needed (via the uHAL PCI
+ * configuration space primitives) we must remap the spaces as follows:
+ *
+ * Usage Local Bus Memory         Base/Map registers used
+ *
+ * Mem   40000000 - 4FFFFFFF      LB_BASE0/LB_MAP0
+ * Mem   50000000 - 5FFFFFFF      LB_BASE0/LB_MAP0
+ * IO    60000000 - 60FFFFFF      LB_BASE2/LB_MAP2
+ * Cfg   61000000 - 61FFFFFF      LB_BASE1/LB_MAP1
+ *
+ * To make this work, the code depends on overlapping windows working.
+ * The V3 chip translates an address by checking its range within
+ * each of the BASE/MAP pairs in turn (in ascending register number
+ * order).  It will use the first matching pair.   So, for example,
+ * if the same address is mapped by both LB_BASE0/LB_MAP0 and
+ * LB_BASE1/LB_MAP1, the V3 will use the translation from
+ * LB_BASE0/LB_MAP0.
+ *
+ * To allow PCI Configuration space access, the code enlarges the
+ * window mapped by LB_BASE0/LB_MAP0 from 256M to 512M.  This occludes
+ * the windows currently mapped by LB_BASE1/LB_MAP1 so that it can
+ * be remapped for use by configuration cycles.
+ *
+ * At the end of the PCI Configuration space accesses,
+ * LB_BASE1/LB_MAP1 is reset to map PCI Memory.  Finally the window
+ * mapped by LB_BASE0/LB_MAP0 is reduced in size from 512M to 256M to
+ * reveal the now restored LB_BASE1/LB_MAP1 window.
+ *
+ * NOTE: We do not set up I2O mapping.  I suspect that this is only
+ * for an intelligent (target) device.  Using I2O disables most of
+ * the mappings into PCI memory.
+ */
+static void __iomem *v3_map_bus(struct pci_bus *bus,
+				unsigned int devfn, int offset)
+{
+	struct v3_pci *v3 = bus->sysdata;
+	unsigned int address, mapaddress, busnr;
+
+	busnr = bus->number;
+	if (busnr == 0) {
+		int slot = PCI_SLOT(devfn);
+
+		/*
+		 * local bus segment so need a type 0 config cycle
+		 *
+		 * build the PCI configuration "address" with one-hot in
+		 * A31-A11
+		 *
+		 * mapaddress:
+		 *  3:1 = config cycle (101)
+		 *  0   = PCI A1 & A0 are 0 (0)
+		 */
+		address = PCI_FUNC(devfn) << 8;
+		mapaddress = V3_LB_MAP_TYPE_CONFIG;
+
+		if (slot > 12)
+			/*
+			 * high order bits are handled by the MAP register
+			 */
+			mapaddress |= BIT(slot - 5);
+		else
+			/*
+			 * low order bits handled directly in the address
+			 */
+			address |= BIT(slot + 11);
+	} else {
+		/*
+		 * not the local bus segment so need a type 1 config cycle
+		 *
+		 * address:
+		 *  23:16 = bus number
+		 *  15:11 = slot number (7:3 of devfn)
+		 *  10:8  = func number (2:0 of devfn)
+		 *
+		 * mapaddress:
+		 *  3:1 = config cycle (101)
+		 *  0   = PCI A1 & A0 from host bus (1)
+		 */
+		mapaddress = V3_LB_MAP_TYPE_CONFIG | V3_LB_MAP_AD_LOW_EN;
+		address = (busnr << 16) | (devfn << 8);
+	}
+
+	/*
+	 * Set up base0 to see all 512Mbytes of memory space (not
+	 * prefetchable), this frees up base1 for re-use by
+	 * configuration memory
+	 */
+	writel(v3_addr_to_lb_base(v3->non_pre_mem) |
+	       V3_LB_BASE_ADR_SIZE_512MB | V3_LB_BASE_ENABLE,
+	       v3->base + V3_LB_BASE0);
+
+	/*
+	 * Set up base1/map1 to point into configuration space.
+	 * The config mem is always 16MB.
+	 */
+	writel(v3_addr_to_lb_base(v3->config_mem) |
+	       V3_LB_BASE_ADR_SIZE_16MB | V3_LB_BASE_ENABLE,
+	       v3->base + V3_LB_BASE1);
+	writew(mapaddress, v3->base + V3_LB_MAP1);
+
+	return v3->config_base + address + offset;
+}
+
+static void v3_unmap_bus(struct v3_pci *v3)
+{
+	/*
+	 * Reassign base1 for use by prefetchable PCI memory
+	 */
+	writel(v3_addr_to_lb_base(v3->pre_mem) |
+	       V3_LB_BASE_ADR_SIZE_256MB | V3_LB_BASE_PREFETCH |
+	       V3_LB_BASE_ENABLE,
+	       v3->base + V3_LB_BASE1);
+	writew(v3_addr_to_lb_map(v3->pre_bus_addr) |
+	       V3_LB_MAP_TYPE_MEM, /* was V3_LB_MAP_TYPE_MEM_MULTIPLE */
+	       v3->base + V3_LB_MAP1);
+
+	/*
+	 * And shrink base0 back to a 256M window (NOTE: MAP0 already correct)
+	 */
+	writel(v3_addr_to_lb_base(v3->non_pre_mem) |
+	       V3_LB_BASE_ADR_SIZE_256MB | V3_LB_BASE_ENABLE,
+	       v3->base + V3_LB_BASE0);
+}
+
+static int v3_pci_read_config(struct pci_bus *bus, unsigned int fn,
+			      int config, int size, u32 *value)
+{
+	struct v3_pci *v3 = bus->sysdata;
+	int ret;
+
+	dev_dbg(&bus->dev,
+		"[read]  slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
+		PCI_SLOT(fn), PCI_FUNC(fn), config, size, *value);
+	ret = pci_generic_config_read(bus, fn, config, size, value);
+	v3_unmap_bus(v3);
+	return ret;
+}
+
+static int v3_pci_write_config(struct pci_bus *bus, unsigned int fn,
+				    int config, int size, u32 value)
+{
+	struct v3_pci *v3 = bus->sysdata;
+	int ret;
+
+	dev_dbg(&bus->dev,
+		"[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
+		PCI_SLOT(fn), PCI_FUNC(fn), config, size, value);
+	ret = pci_generic_config_write(bus, fn, config, size, value);
+	v3_unmap_bus(v3);
+	return ret;
+}
+
+static struct pci_ops v3_pci_ops = {
+	.map_bus = v3_map_bus,
+	.read = v3_pci_read_config,
+	.write = v3_pci_write_config,
+};
+
+static irqreturn_t v3_irq(int irq, void *data)
+{
+	struct v3_pci *v3 = data;
+	struct device *dev = v3->dev;
+	u32 status;
+
+	status = readw(v3->base + V3_PCI_STAT);
+	if (status & V3_PCI_STAT_PAR_ERR)
+		dev_err(dev, "parity error interrupt\n");
+	if (status & V3_PCI_STAT_SYS_ERR)
+		dev_err(dev, "system error interrupt\n");
+	if (status & V3_PCI_STAT_M_ABORT_ERR)
+		dev_err(dev, "master abort error interrupt\n");
+	if (status & V3_PCI_STAT_T_ABORT_ERR)
+		dev_err(dev, "target abort error interrupt\n");
+	writew(status, v3->base + V3_PCI_STAT);
+
+	status = readb(v3->base + V3_LB_ISTAT);
+	if (status & V3_LB_ISTAT_MAILBOX)
+		dev_info(dev, "PCI mailbox interrupt\n");
+	if (status & V3_LB_ISTAT_PCI_RD)
+		dev_err(dev, "PCI target LB->PCI READ abort interrupt\n");
+	if (status & V3_LB_ISTAT_PCI_WR)
+		dev_err(dev, "PCI target LB->PCI WRITE abort interrupt\n");
+	if (status &  V3_LB_ISTAT_PCI_INT)
+		dev_info(dev, "PCI pin interrupt\n");
+	if (status & V3_LB_ISTAT_PCI_PERR)
+		dev_err(dev, "PCI parity error interrupt\n");
+	if (status & V3_LB_ISTAT_I2O_QWR)
+		dev_info(dev, "I2O inbound post queue interrupt\n");
+	if (status & V3_LB_ISTAT_DMA1)
+		dev_info(dev, "DMA channel 1 interrupt\n");
+	if (status & V3_LB_ISTAT_DMA0)
+		dev_info(dev, "DMA channel 0 interrupt\n");
+	/* Clear all possible interrupts on the local bus */
+	writeb(0, v3->base + V3_LB_ISTAT);
+	if (v3->map)
+		regmap_write(v3->map, INTEGRATOR_SC_PCI_OFFSET,
+			     INTEGRATOR_SC_PCI_ENABLE |
+			     INTEGRATOR_SC_PCI_INTCLR);
+
+	return IRQ_HANDLED;
+}
+
+static int v3_integrator_init(struct v3_pci *v3)
+{
+	unsigned int val;
+
+	v3->map =
+		syscon_regmap_lookup_by_compatible("arm,integrator-ap-syscon");
+	if (IS_ERR(v3->map)) {
+		dev_err(v3->dev, "no syscon\n");
+		return -ENODEV;
+	}
+
+	regmap_read(v3->map, INTEGRATOR_SC_PCI_OFFSET, &val);
+	/* Take the PCI bridge out of reset, clear IRQs */
+	regmap_write(v3->map, INTEGRATOR_SC_PCI_OFFSET,
+		     INTEGRATOR_SC_PCI_ENABLE |
+		     INTEGRATOR_SC_PCI_INTCLR);
+
+	if (!(val & INTEGRATOR_SC_PCI_ENABLE)) {
+		/* If we were in reset we need to sleep a bit */
+		msleep(230);
+
+		/* Set the physical base for the controller itself */
+		writel(0x6200, v3->base + V3_LB_IO_BASE);
+
+		/* Wait for the mailbox to settle after reset */
+		do {
+			writeb(0xaa, v3->base + V3_MAIL_DATA);
+			writeb(0x55, v3->base + V3_MAIL_DATA + 4);
+		} while (readb(v3->base + V3_MAIL_DATA) != 0xaa &&
+			 readb(v3->base + V3_MAIL_DATA) != 0x55);
+	}
+
+	dev_info(v3->dev, "initialized PCI V3 Integrator/AP integration\n");
+
+	return 0;
+}
+
+static int v3_pci_setup_resource(struct v3_pci *v3,
+				 resource_size_t io_base,
+				 struct pci_host_bridge *host,
+				 struct resource_entry *win)
+{
+	struct device *dev = v3->dev;
+	struct resource *mem;
+	struct resource *io;
+	int ret;
+
+	switch (resource_type(win->res)) {
+	case IORESOURCE_IO:
+		io = win->res;
+		io->name = "V3 PCI I/O";
+		v3->io_mem = io_base;
+		v3->io_bus_addr = io->start - win->offset;
+		dev_dbg(dev, "I/O window %pR, bus addr %pap\n",
+			io, &v3->io_bus_addr);
+		ret = devm_pci_remap_iospace(dev, io, io_base);
+		if (ret) {
+			dev_warn(dev,
+				 "error %d: failed to map resource %pR\n",
+				 ret, io);
+			return ret;
+		}
+		/* Setup window 2 - PCI I/O */
+		writel(v3_addr_to_lb_base2(v3->io_mem) |
+		       V3_LB_BASE2_ENABLE,
+		       v3->base + V3_LB_BASE2);
+		writew(v3_addr_to_lb_map2(v3->io_bus_addr),
+		       v3->base + V3_LB_MAP2);
+		break;
+	case IORESOURCE_MEM:
+		mem = win->res;
+		if (mem->flags & IORESOURCE_PREFETCH) {
+			mem->name = "V3 PCI PRE-MEM";
+			v3->pre_mem = mem->start;
+			v3->pre_bus_addr = mem->start - win->offset;
+			dev_dbg(dev, "PREFETCHABLE MEM window %pR, bus addr %pap\n",
+				mem, &v3->pre_bus_addr);
+			if (resource_size(mem) != SZ_256M) {
+				dev_err(dev, "prefetchable memory range is not 256MB\n");
+				return -EINVAL;
+			}
+			if (v3->non_pre_mem &&
+			    (mem->start != v3->non_pre_mem + SZ_256M)) {
+				dev_err(dev,
+					"prefetchable memory is not adjacent to non-prefetchable memory\n");
+				return -EINVAL;
+			}
+			/* Setup window 1 - PCI prefetchable memory */
+			writel(v3_addr_to_lb_base(v3->pre_mem) |
+			       V3_LB_BASE_ADR_SIZE_256MB |
+			       V3_LB_BASE_PREFETCH |
+			       V3_LB_BASE_ENABLE,
+			       v3->base + V3_LB_BASE1);
+			writew(v3_addr_to_lb_map(v3->pre_bus_addr) |
+			       V3_LB_MAP_TYPE_MEM, /* Was V3_LB_MAP_TYPE_MEM_MULTIPLE */
+			       v3->base + V3_LB_MAP1);
+		} else {
+			mem->name = "V3 PCI NON-PRE-MEM";
+			v3->non_pre_mem = mem->start;
+			v3->non_pre_bus_addr = mem->start - win->offset;
+			dev_dbg(dev, "NON-PREFETCHABLE MEM window %pR, bus addr %pap\n",
+				mem, &v3->non_pre_bus_addr);
+			if (resource_size(mem) != SZ_256M) {
+				dev_err(dev,
+					"non-prefetchable memory range is not 256MB\n");
+				return -EINVAL;
+			}
+			/* Setup window 0 - PCI non-prefetchable memory */
+			writel(v3_addr_to_lb_base(v3->non_pre_mem) |
+			       V3_LB_BASE_ADR_SIZE_256MB |
+			       V3_LB_BASE_ENABLE,
+			       v3->base + V3_LB_BASE0);
+			writew(v3_addr_to_lb_map(v3->non_pre_bus_addr) |
+			       V3_LB_MAP_TYPE_MEM,
+			       v3->base + V3_LB_MAP0);
+		}
+		break;
+	case IORESOURCE_BUS:
+		dev_dbg(dev, "BUS %pR\n", win->res);
+		host->busnr = win->res->start;
+		break;
+	default:
+		dev_info(dev, "Unknown resource type %lu\n",
+			 resource_type(win->res));
+		break;
+	}
+
+	return 0;
+}
+
+static int v3_get_dma_range_config(struct v3_pci *v3,
+				   struct of_pci_range *range,
+				   u32 *pci_base, u32 *pci_map)
+{
+	struct device *dev = v3->dev;
+	u64 cpu_end = range->cpu_addr + range->size - 1;
+	u64 pci_end = range->pci_addr + range->size - 1;
+	u32 val;
+
+	if (range->pci_addr & ~V3_PCI_BASE_M_ADR_BASE) {
+		dev_err(dev, "illegal range, only PCI bits 31..20 allowed\n");
+		return -EINVAL;
+	}
+	val = ((u32)range->pci_addr) & V3_PCI_BASE_M_ADR_BASE;
+	*pci_base = val;
+
+	if (range->cpu_addr & ~V3_PCI_MAP_M_MAP_ADR) {
+		dev_err(dev, "illegal range, only CPU bits 31..20 allowed\n");
+		return -EINVAL;
+	}
+	val = ((u32)range->cpu_addr) & V3_PCI_MAP_M_MAP_ADR;
+
+	switch (range->size) {
+	case SZ_1M:
+		val |= V3_LB_BASE_ADR_SIZE_1MB;
+		break;
+	case SZ_2M:
+		val |= V3_LB_BASE_ADR_SIZE_2MB;
+		break;
+	case SZ_4M:
+		val |= V3_LB_BASE_ADR_SIZE_4MB;
+		break;
+	case SZ_8M:
+		val |= V3_LB_BASE_ADR_SIZE_8MB;
+		break;
+	case SZ_16M:
+		val |= V3_LB_BASE_ADR_SIZE_16MB;
+		break;
+	case SZ_32M:
+		val |= V3_LB_BASE_ADR_SIZE_32MB;
+		break;
+	case SZ_64M:
+		val |= V3_LB_BASE_ADR_SIZE_64MB;
+		break;
+	case SZ_128M:
+		val |= V3_LB_BASE_ADR_SIZE_128MB;
+		break;
+	case SZ_256M:
+		val |= V3_LB_BASE_ADR_SIZE_256MB;
+		break;
+	case SZ_512M:
+		val |= V3_LB_BASE_ADR_SIZE_512MB;
+		break;
+	case SZ_1G:
+		val |= V3_LB_BASE_ADR_SIZE_1GB;
+		break;
+	case SZ_2G:
+		val |= V3_LB_BASE_ADR_SIZE_2GB;
+		break;
+	default:
+		dev_err(v3->dev, "illegal dma memory chunk size\n");
+		return -EINVAL;
+		break;
+	}
+	val |= V3_PCI_MAP_M_REG_EN | V3_PCI_MAP_M_ENABLE;
+	*pci_map = val;
+
+	dev_dbg(dev,
+		"DMA MEM CPU: 0x%016llx -> 0x%016llx => "
+		"PCI: 0x%016llx -> 0x%016llx base %08x map %08x\n",
+		range->cpu_addr, cpu_end,
+		range->pci_addr, pci_end,
+		*pci_base, *pci_map);
+
+	return 0;
+}
+
+static int v3_pci_parse_map_dma_ranges(struct v3_pci *v3,
+				       struct device_node *np)
+{
+	struct of_pci_range range;
+	struct of_pci_range_parser parser;
+	struct device *dev = v3->dev;
+	int i = 0;
+
+	if (of_pci_dma_range_parser_init(&parser, np)) {
+		dev_err(dev, "missing dma-ranges property\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Get the dma-ranges from the device tree
+	 */
+	for_each_of_pci_range(&parser, &range) {
+		int ret;
+		u32 pci_base, pci_map;
+
+		ret = v3_get_dma_range_config(v3, &range, &pci_base, &pci_map);
+		if (ret)
+			return ret;
+
+		if (i == 0) {
+			writel(pci_base, v3->base + V3_PCI_BASE0);
+			writel(pci_map, v3->base + V3_PCI_MAP0);
+		} else if (i == 1) {
+			writel(pci_base, v3->base + V3_PCI_BASE1);
+			writel(pci_map, v3->base + V3_PCI_MAP1);
+		} else {
+			dev_err(dev, "too many ranges, only two supported\n");
+			dev_err(dev, "range %d ignored\n", i);
+		}
+		i++;
+	}
+	return 0;
+}
+
+static int v3_pci_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	resource_size_t io_base;
+	struct resource *regs;
+	struct resource_entry *win;
+	struct v3_pci *v3;
+	struct pci_host_bridge *host;
+	struct clk *clk;
+	u16 val;
+	int irq;
+	int ret;
+	LIST_HEAD(res);
+
+	host = devm_pci_alloc_host_bridge(dev, sizeof(*v3));
+	if (!host)
+		return -ENOMEM;
+
+	host->dev.parent = dev;
+	host->ops = &v3_pci_ops;
+	host->busnr = 0;
+	host->msi = NULL;
+	host->map_irq = of_irq_parse_and_map_pci;
+	host->swizzle_irq = pci_common_swizzle;
+	v3 = pci_host_bridge_priv(host);
+	host->sysdata = v3;
+	v3->dev = dev;
+
+	/* Get and enable host clock */
+	clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(clk)) {
+		dev_err(dev, "clock not found\n");
+		return PTR_ERR(clk);
+	}
+	ret = clk_prepare_enable(clk);
+	if (ret) {
+		dev_err(dev, "unable to enable clock\n");
+		return ret;
+	}
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	v3->base = devm_ioremap_resource(dev, regs);
+	if (IS_ERR(v3->base))
+		return PTR_ERR(v3->base);
+	/*
+	 * The hardware has a register with the physical base address
+	 * of the V3 controller itself, verify that this is the same
+	 * as the physical memory we've remapped it from.
+	 */
+	if (readl(v3->base + V3_LB_IO_BASE) != (regs->start >> 16))
+		dev_err(dev, "V3_LB_IO_BASE = %08x but device is @%pR\n",
+			readl(v3->base + V3_LB_IO_BASE), regs);
+
+	/* Configuration space is 16MB directly mapped */
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (resource_size(regs) != SZ_16M) {
+		dev_err(dev, "config mem is not 16MB!\n");
+		return -EINVAL;
+	}
+	v3->config_mem = regs->start;
+	v3->config_base = devm_ioremap_resource(dev, regs);
+	if (IS_ERR(v3->config_base))
+		return PTR_ERR(v3->config_base);
+
+	ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
+						    &io_base);
+	if (ret)
+		return ret;
+
+	ret = devm_request_pci_bus_resources(dev, &res);
+	if (ret)
+		return ret;
+
+	/* Get and request error IRQ resource */
+	irq = platform_get_irq(pdev, 0);
+	if (irq <= 0) {
+		dev_err(dev, "unable to obtain PCIv3 error IRQ\n");
+		return -ENODEV;
+	}
+	ret = devm_request_irq(dev, irq, v3_irq, 0,
+			"PCIv3 error", v3);
+	if (ret < 0) {
+		dev_err(dev,
+			"unable to request PCIv3 error IRQ %d (%d)\n",
+			irq, ret);
+		return ret;
+	}
+
+	/*
+	 * Unlock V3 registers, but only if they were previously locked.
+	 */
+	if (readw(v3->base + V3_SYSTEM) & V3_SYSTEM_M_LOCK)
+		writew(V3_SYSTEM_UNLOCK, v3->base + V3_SYSTEM);
+
+	/* Disable all slave access while we set up the windows */
+	val = readw(v3->base + V3_PCI_CMD);
+	val &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+	writew(val, v3->base + V3_PCI_CMD);
+
+	/* Put the PCI bus into reset */
+	val = readw(v3->base + V3_SYSTEM);
+	val &= ~V3_SYSTEM_M_RST_OUT;
+	writew(val, v3->base + V3_SYSTEM);
+
+	/* Retry until we're ready */
+	val = readw(v3->base + V3_PCI_CFG);
+	val |= V3_PCI_CFG_M_RETRY_EN;
+	writew(val, v3->base + V3_PCI_CFG);
+
+	/* Set up the local bus protocol */
+	val = readw(v3->base + V3_LB_CFG);
+	val |= V3_LB_CFG_LB_BE_IMODE; /* Byte enable input */
+	val |= V3_LB_CFG_LB_BE_OMODE; /* Byte enable output */
+	val &= ~V3_LB_CFG_LB_ENDIAN; /* Little endian */
+	val &= ~V3_LB_CFG_LB_PPC_RDY; /* TODO: when using on PPC403Gx, set to 1 */
+	writew(val, v3->base + V3_LB_CFG);
+
+	/* Enable the PCI bus master */
+	val = readw(v3->base + V3_PCI_CMD);
+	val |= PCI_COMMAND_MASTER;
+	writew(val, v3->base + V3_PCI_CMD);
+
+	/* Get the I/O and memory ranges from DT */
+	resource_list_for_each_entry(win, &res) {
+		ret = v3_pci_setup_resource(v3, io_base, host, win);
+		if (ret) {
+			dev_err(dev, "error setting up resources\n");
+			return ret;
+		}
+	}
+	ret = v3_pci_parse_map_dma_ranges(v3, np);
+	if (ret)
+		return ret;
+
+	/*
+	 * Disable PCI to host IO cycles, enable I/O buffers @3.3V,
+	 * set AD_LOW0 to 1 if one of the LB_MAP registers choose
+	 * to use this (should be unused).
+	 */
+	writel(0x00000000, v3->base + V3_PCI_IO_BASE);
+	val = V3_PCI_CFG_M_IO_REG_DIS | V3_PCI_CFG_M_IO_DIS |
+		V3_PCI_CFG_M_EN3V | V3_PCI_CFG_M_AD_LOW0;
+	/*
+	 * DMA read and write from PCI bus commands types
+	 */
+	val |=  V3_PCI_CFG_TYPE_DEFAULT << V3_PCI_CFG_M_RTYPE_SHIFT;
+	val |=  V3_PCI_CFG_TYPE_DEFAULT << V3_PCI_CFG_M_WTYPE_SHIFT;
+	writew(val, v3->base + V3_PCI_CFG);
+
+	/*
+	 * Set the V3 FIFO such that writes have higher priority than
+	 * reads, and local bus write causes local bus read fifo flush
+	 * on aperture 1. Same for PCI.
+	 */
+	writew(V3_FIFO_PRIO_LB_RD1_FLUSH_AP1 |
+	       V3_FIFO_PRIO_LB_RD0_FLUSH_AP1 |
+	       V3_FIFO_PRIO_PCI_RD1_FLUSH_AP1 |
+	       V3_FIFO_PRIO_PCI_RD0_FLUSH_AP1,
+	       v3->base + V3_FIFO_PRIORITY);
+
+
+	/*
+	 * Clear any error interrupts, and enable parity and write error
+	 * interrupts
+	 */
+	writeb(0, v3->base + V3_LB_ISTAT);
+	val = readw(v3->base + V3_LB_CFG);
+	val |= V3_LB_CFG_LB_LB_INT;
+	writew(val, v3->base + V3_LB_CFG);
+	writeb(V3_LB_ISTAT_PCI_WR | V3_LB_ISTAT_PCI_PERR,
+	       v3->base + V3_LB_IMASK);
+
+	/* Special Integrator initialization */
+	if (of_device_is_compatible(np, "arm,integrator-ap-pci")) {
+		ret = v3_integrator_init(v3);
+		if (ret)
+			return ret;
+	}
+
+	/* Post-init: enable PCI memory and invalidate (master already on) */
+	val = readw(v3->base + V3_PCI_CMD);
+	val |= PCI_COMMAND_MEMORY | PCI_COMMAND_INVALIDATE;
+	writew(val, v3->base + V3_PCI_CMD);
+
+	/* Clear pending interrupts */
+	writeb(0, v3->base + V3_LB_ISTAT);
+	/* Read or write errors and parity errors cause interrupts */
+	writeb(V3_LB_ISTAT_PCI_RD | V3_LB_ISTAT_PCI_WR | V3_LB_ISTAT_PCI_PERR,
+	       v3->base + V3_LB_IMASK);
+
+	/* Take the PCI bus out of reset so devices can initialize */
+	val = readw(v3->base + V3_SYSTEM);
+	val |= V3_SYSTEM_M_RST_OUT;
+	writew(val, v3->base + V3_SYSTEM);
+
+	/*
+	 * Re-lock the system register.
+	 */
+	val = readw(v3->base + V3_SYSTEM);
+	val |= V3_SYSTEM_M_LOCK;
+	writew(val, v3->base + V3_SYSTEM);
+
+	list_splice_init(&res, &host->windows);
+	ret = pci_scan_root_bus_bridge(host);
+	if (ret) {
+		dev_err(dev, "failed to register host: %d\n", ret);
+		return ret;
+	}
+	v3->bus = host->bus;
+
+	pci_bus_assign_resources(v3->bus);
+	pci_bus_add_devices(v3->bus);
+
+	return 0;
+}
+
+static const struct of_device_id v3_pci_of_match[] = {
+	{
+		.compatible = "v3,v360epc-pci",
+	},
+	{},
+};
+
+static struct platform_driver v3_pci_driver = {
+	.driver = {
+		.name = "pci-v3-semi",
+		.of_match_table = of_match_ptr(v3_pci_of_match),
+		.suppress_bind_attrs = true,
+	},
+	.probe  = v3_pci_probe,
+};
+builtin_platform_driver(v3_pci_driver);
diff --git a/marvell/linux/drivers/pci/controller/pci-versatile.c b/marvell/linux/drivers/pci/controller/pci-versatile.c
new file mode 100644
index 0000000..f59ad27
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pci-versatile.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2004 Koninklijke Philips Electronics NV
+ *
+ * Conversion to platform driver and DT:
+ * Copyright 2014 Linaro Ltd.
+ *
+ * 14/04/2005 Initial version, colin.king@philips.com
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include "../pci.h"
+
+static void __iomem *versatile_pci_base;
+static void __iomem *versatile_cfg_base[2];
+
+#define PCI_IMAP(m)		(versatile_pci_base + ((m) * 4))
+#define PCI_SMAP(m)		(versatile_pci_base + 0x14 + ((m) * 4))
+#define PCI_SELFID		(versatile_pci_base + 0xc)
+
+#define VP_PCI_DEVICE_ID		0x030010ee
+#define VP_PCI_CLASS_ID			0x0b400000
+
+static u32 pci_slot_ignore;
+
+static int __init versatile_pci_slot_ignore(char *str)
+{
+	int retval;
+	int slot;
+
+	while ((retval = get_option(&str, &slot))) {
+		if ((slot < 0) || (slot > 31))
+			pr_err("Illegal slot value: %d\n", slot);
+		else
+			pci_slot_ignore |= (1 << slot);
+	}
+	return 1;
+}
+__setup("pci_slot_ignore=", versatile_pci_slot_ignore);
+
+
+static void __iomem *versatile_map_bus(struct pci_bus *bus,
+				       unsigned int devfn, int offset)
+{
+	unsigned int busnr = bus->number;
+
+	if (pci_slot_ignore & (1 << PCI_SLOT(devfn)))
+		return NULL;
+
+	return versatile_cfg_base[1] + ((busnr << 16) | (devfn << 8) | offset);
+}
+
+static struct pci_ops pci_versatile_ops = {
+	.map_bus = versatile_map_bus,
+	.read	= pci_generic_config_read32,
+	.write	= pci_generic_config_write,
+};
+
+static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
+						     struct list_head *res)
+{
+	int err, mem = 1, res_valid = 0;
+	resource_size_t iobase;
+	struct resource_entry *win, *tmp;
+
+	err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, res, &iobase);
+	if (err)
+		return err;
+
+	err = devm_request_pci_bus_resources(dev, res);
+	if (err)
+		goto out_release_res;
+
+	resource_list_for_each_entry_safe(win, tmp, res) {
+		struct resource *res = win->res;
+
+		switch (resource_type(res)) {
+		case IORESOURCE_IO:
+			err = devm_pci_remap_iospace(dev, res, iobase);
+			if (err) {
+				dev_warn(dev, "error %d: failed to map resource %pR\n",
+					 err, res);
+				resource_list_destroy_entry(win);
+			}
+			break;
+		case IORESOURCE_MEM:
+			res_valid |= !(res->flags & IORESOURCE_PREFETCH);
+
+			writel(res->start >> 28, PCI_IMAP(mem));
+			writel(PHYS_OFFSET >> 28, PCI_SMAP(mem));
+			mem++;
+
+			break;
+		}
+	}
+
+	if (res_valid)
+		return 0;
+
+	dev_err(dev, "non-prefetchable memory resource required\n");
+	err = -EINVAL;
+
+out_release_res:
+	pci_free_resource_list(res);
+	return err;
+}
+
+static int versatile_pci_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	int ret, i, myslot = -1;
+	u32 val;
+	void __iomem *local_pci_cfg_base;
+	struct pci_bus *bus, *child;
+	struct pci_host_bridge *bridge;
+	LIST_HEAD(pci_res);
+
+	bridge = devm_pci_alloc_host_bridge(dev, 0);
+	if (!bridge)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	versatile_pci_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(versatile_pci_base))
+		return PTR_ERR(versatile_pci_base);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	versatile_cfg_base[0] = devm_ioremap_resource(dev, res);
+	if (IS_ERR(versatile_cfg_base[0]))
+		return PTR_ERR(versatile_cfg_base[0]);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+	versatile_cfg_base[1] = devm_pci_remap_cfg_resource(dev, res);
+	if (IS_ERR(versatile_cfg_base[1]))
+		return PTR_ERR(versatile_cfg_base[1]);
+
+	ret = versatile_pci_parse_request_of_pci_ranges(dev, &pci_res);
+	if (ret)
+		return ret;
+
+	/*
+	 * We need to discover the PCI core first to configure itself
+	 * before the main PCI probing is performed
+	 */
+	for (i = 0; i < 32; i++) {
+		if ((readl(versatile_cfg_base[0] + (i << 11) + PCI_VENDOR_ID) == VP_PCI_DEVICE_ID) &&
+		    (readl(versatile_cfg_base[0] + (i << 11) + PCI_CLASS_REVISION) == VP_PCI_CLASS_ID)) {
+			myslot = i;
+			break;
+		}
+	}
+	if (myslot == -1) {
+		dev_err(dev, "Cannot find PCI core!\n");
+		return -EIO;
+	}
+	/*
+	 * Do not to map Versatile FPGA PCI device into memory space
+	 */
+	pci_slot_ignore |= (1 << myslot);
+
+	dev_info(dev, "PCI core found (slot %d)\n", myslot);
+
+	writel(myslot, PCI_SELFID);
+	local_pci_cfg_base = versatile_cfg_base[1] + (myslot << 11);
+
+	val = readl(local_pci_cfg_base + PCI_COMMAND);
+	val |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE;
+	writel(val, local_pci_cfg_base + PCI_COMMAND);
+
+	/*
+	 * Configure the PCI inbound memory windows to be 1:1 mapped to SDRAM
+	 */
+	writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_0);
+	writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_1);
+	writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_2);
+
+	/*
+	 * For many years the kernel and QEMU were symbiotically buggy
+	 * in that they both assumed the same broken IRQ mapping.
+	 * QEMU therefore attempts to auto-detect old broken kernels
+	 * so that they still work on newer QEMU as they did on old
+	 * QEMU. Since we now use the correct (ie matching-hardware)
+	 * IRQ mapping we write a definitely different value to a
+	 * PCI_INTERRUPT_LINE register to tell QEMU that we expect
+	 * real hardware behaviour and it need not be backwards
+	 * compatible for us. This write is harmless on real hardware.
+	 */
+	writel(0, versatile_cfg_base[0] + PCI_INTERRUPT_LINE);
+
+	pci_add_flags(PCI_ENABLE_PROC_DOMAINS);
+	pci_add_flags(PCI_REASSIGN_ALL_BUS);
+
+	list_splice_init(&pci_res, &bridge->windows);
+	bridge->dev.parent = dev;
+	bridge->sysdata = NULL;
+	bridge->busnr = 0;
+	bridge->ops = &pci_versatile_ops;
+	bridge->map_irq = of_irq_parse_and_map_pci;
+	bridge->swizzle_irq = pci_common_swizzle;
+
+	ret = pci_scan_root_bus_bridge(bridge);
+	if (ret < 0)
+		return ret;
+
+	bus = bridge->bus;
+
+	pci_assign_unassigned_bus_resources(bus);
+	list_for_each_entry(child, &bus->children, node)
+		pcie_bus_configure_settings(child);
+	pci_bus_add_devices(bus);
+
+	return 0;
+}
+
+static const struct of_device_id versatile_pci_of_match[] = {
+	{ .compatible = "arm,versatile-pci", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, versatile_pci_of_match);
+
+static struct platform_driver versatile_pci_driver = {
+	.driver = {
+		.name = "versatile-pci",
+		.of_match_table = versatile_pci_of_match,
+		.suppress_bind_attrs = true,
+	},
+	.probe = versatile_pci_probe,
+};
+module_platform_driver(versatile_pci_driver);
+
+MODULE_DESCRIPTION("Versatile PCI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/marvell/linux/drivers/pci/controller/pci-xgene-msi.c b/marvell/linux/drivers/pci/controller/pci-xgene-msi.c
new file mode 100644
index 0000000..0bfa506
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pci-xgene-msi.c
@@ -0,0 +1,539 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * APM X-Gene MSI Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Author: Tanmay Inamdar <tinamdar@apm.com>
+ *	   Duc Dang <dhdang@apm.com>
+ */
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/of_pci.h>
+
+#define MSI_IR0			0x000000
+#define MSI_INT0		0x800000
+#define IDX_PER_GROUP		8
+#define IRQS_PER_IDX		16
+#define NR_HW_IRQS		16
+#define NR_MSI_VEC		(IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS)
+
+struct xgene_msi_group {
+	struct xgene_msi	*msi;
+	int			gic_irq;
+	u32			msi_grp;
+};
+
+struct xgene_msi {
+	struct device_node	*node;
+	struct irq_domain	*inner_domain;
+	struct irq_domain	*msi_domain;
+	u64			msi_addr;
+	void __iomem		*msi_regs;
+	unsigned long		*bitmap;
+	struct mutex		bitmap_lock;
+	struct xgene_msi_group	*msi_groups;
+	int			num_cpus;
+};
+
+/* Global data */
+static struct xgene_msi xgene_msi_ctrl;
+
+static struct irq_chip xgene_msi_top_irq_chip = {
+	.name		= "X-Gene1 MSI",
+	.irq_enable	= pci_msi_unmask_irq,
+	.irq_disable	= pci_msi_mask_irq,
+	.irq_mask	= pci_msi_mask_irq,
+	.irq_unmask	= pci_msi_unmask_irq,
+};
+
+static struct  msi_domain_info xgene_msi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+		  MSI_FLAG_PCI_MSIX),
+	.chip	= &xgene_msi_top_irq_chip,
+};
+
+/*
+ * X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where
+ * n is group number (0..F), x is index of registers in each group (0..7)
+ * The register layout is as follows:
+ * MSI0IR0			base_addr
+ * MSI0IR1			base_addr +  0x10000
+ * ...				...
+ * MSI0IR6			base_addr +  0x60000
+ * MSI0IR7			base_addr +  0x70000
+ * MSI1IR0			base_addr +  0x80000
+ * MSI1IR1			base_addr +  0x90000
+ * ...				...
+ * MSI1IR7			base_addr +  0xF0000
+ * MSI2IR0			base_addr + 0x100000
+ * ...				...
+ * MSIFIR0			base_addr + 0x780000
+ * MSIFIR1			base_addr + 0x790000
+ * ...				...
+ * MSIFIR7			base_addr + 0x7F0000
+ * MSIINT0			base_addr + 0x800000
+ * MSIINT1			base_addr + 0x810000
+ * ...				...
+ * MSIINTF			base_addr + 0x8F0000
+ *
+ * Each index register supports 16 MSI vectors (0..15) to generate interrupt.
+ * There are total 16 GIC IRQs assigned for these 16 groups of MSI termination
+ * registers.
+ *
+ * Each MSI termination group has 1 MSIINTn register (n is 0..15) to indicate
+ * the MSI pending status caused by 1 of its 8 index registers.
+ */
+
+/* MSInIRx read helper */
+static u32 xgene_msi_ir_read(struct xgene_msi *msi,
+				    u32 msi_grp, u32 msir_idx)
+{
+	return readl_relaxed(msi->msi_regs + MSI_IR0 +
+			      (msi_grp << 19) + (msir_idx << 16));
+}
+
+/* MSIINTn read helper */
+static u32 xgene_msi_int_read(struct xgene_msi *msi, u32 msi_grp)
+{
+	return readl_relaxed(msi->msi_regs + MSI_INT0 + (msi_grp << 16));
+}
+
+/*
+ * With 2048 MSI vectors supported, the MSI message can be constructed using
+ * following scheme:
+ * - Divide into 8 256-vector groups
+ *		Group 0: 0-255
+ *		Group 1: 256-511
+ *		Group 2: 512-767
+ *		...
+ *		Group 7: 1792-2047
+ * - Each 256-vector group is divided into 16 16-vector groups
+ *	As an example: 16 16-vector groups for 256-vector group 0-255 is
+ *		Group 0: 0-15
+ *		Group 1: 16-32
+ *		...
+ *		Group 15: 240-255
+ * - The termination address of MSI vector in 256-vector group n and 16-vector
+ *   group x is the address of MSIxIRn
+ * - The data for MSI vector in 16-vector group x is x
+ */
+static u32 hwirq_to_reg_set(unsigned long hwirq)
+{
+	return (hwirq / (NR_HW_IRQS * IRQS_PER_IDX));
+}
+
+static u32 hwirq_to_group(unsigned long hwirq)
+{
+	return (hwirq % NR_HW_IRQS);
+}
+
+static u32 hwirq_to_msi_data(unsigned long hwirq)
+{
+	return ((hwirq / NR_HW_IRQS) % IRQS_PER_IDX);
+}
+
+static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+	struct xgene_msi *msi = irq_data_get_irq_chip_data(data);
+	u32 reg_set = hwirq_to_reg_set(data->hwirq);
+	u32 group = hwirq_to_group(data->hwirq);
+	u64 target_addr = msi->msi_addr + (((8 * group) + reg_set) << 16);
+
+	msg->address_hi = upper_32_bits(target_addr);
+	msg->address_lo = lower_32_bits(target_addr);
+	msg->data = hwirq_to_msi_data(data->hwirq);
+}
+
+/*
+ * X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors.  To maintain
+ * the expected behaviour of .set_affinity for each MSI interrupt, the 16
+ * MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs
+ * for each core).  The MSI vector is moved fom 1 MSI GIC IRQ to another
+ * MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core.  As a
+ * consequence, the total MSI vectors that X-Gene v1 supports will be
+ * reduced to 256 (2048/8) vectors.
+ */
+static int hwirq_to_cpu(unsigned long hwirq)
+{
+	return (hwirq % xgene_msi_ctrl.num_cpus);
+}
+
+static unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq)
+{
+	return (hwirq - hwirq_to_cpu(hwirq));
+}
+
+static int xgene_msi_set_affinity(struct irq_data *irqdata,
+				  const struct cpumask *mask, bool force)
+{
+	int target_cpu = cpumask_first(mask);
+	int curr_cpu;
+
+	curr_cpu = hwirq_to_cpu(irqdata->hwirq);
+	if (curr_cpu == target_cpu)
+		return IRQ_SET_MASK_OK_DONE;
+
+	/* Update MSI number to target the new CPU */
+	irqdata->hwirq = hwirq_to_canonical_hwirq(irqdata->hwirq) + target_cpu;
+
+	return IRQ_SET_MASK_OK;
+}
+
+static struct irq_chip xgene_msi_bottom_irq_chip = {
+	.name			= "MSI",
+	.irq_set_affinity       = xgene_msi_set_affinity,
+	.irq_compose_msi_msg	= xgene_compose_msi_msg,
+};
+
+static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+				  unsigned int nr_irqs, void *args)
+{
+	struct xgene_msi *msi = domain->host_data;
+	int msi_irq;
+
+	mutex_lock(&msi->bitmap_lock);
+
+	msi_irq = bitmap_find_next_zero_area(msi->bitmap, NR_MSI_VEC, 0,
+					     msi->num_cpus, 0);
+	if (msi_irq < NR_MSI_VEC)
+		bitmap_set(msi->bitmap, msi_irq, msi->num_cpus);
+	else
+		msi_irq = -ENOSPC;
+
+	mutex_unlock(&msi->bitmap_lock);
+
+	if (msi_irq < 0)
+		return msi_irq;
+
+	irq_domain_set_info(domain, virq, msi_irq,
+			    &xgene_msi_bottom_irq_chip, domain->host_data,
+			    handle_simple_irq, NULL, NULL);
+
+	return 0;
+}
+
+static void xgene_irq_domain_free(struct irq_domain *domain,
+				  unsigned int virq, unsigned int nr_irqs)
+{
+	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+	struct xgene_msi *msi = irq_data_get_irq_chip_data(d);
+	u32 hwirq;
+
+	mutex_lock(&msi->bitmap_lock);
+
+	hwirq = hwirq_to_canonical_hwirq(d->hwirq);
+	bitmap_clear(msi->bitmap, hwirq, msi->num_cpus);
+
+	mutex_unlock(&msi->bitmap_lock);
+
+	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+	.alloc  = xgene_irq_domain_alloc,
+	.free   = xgene_irq_domain_free,
+};
+
+static int xgene_allocate_domains(struct xgene_msi *msi)
+{
+	msi->inner_domain = irq_domain_add_linear(NULL, NR_MSI_VEC,
+						  &msi_domain_ops, msi);
+	if (!msi->inner_domain)
+		return -ENOMEM;
+
+	msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(msi->node),
+						    &xgene_msi_domain_info,
+						    msi->inner_domain);
+
+	if (!msi->msi_domain) {
+		irq_domain_remove(msi->inner_domain);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void xgene_free_domains(struct xgene_msi *msi)
+{
+	if (msi->msi_domain)
+		irq_domain_remove(msi->msi_domain);
+	if (msi->inner_domain)
+		irq_domain_remove(msi->inner_domain);
+}
+
+static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi)
+{
+	int size = BITS_TO_LONGS(NR_MSI_VEC) * sizeof(long);
+
+	xgene_msi->bitmap = kzalloc(size, GFP_KERNEL);
+	if (!xgene_msi->bitmap)
+		return -ENOMEM;
+
+	mutex_init(&xgene_msi->bitmap_lock);
+
+	xgene_msi->msi_groups = kcalloc(NR_HW_IRQS,
+					sizeof(struct xgene_msi_group),
+					GFP_KERNEL);
+	if (!xgene_msi->msi_groups)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void xgene_msi_isr(struct irq_desc *desc)
+{
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct xgene_msi_group *msi_groups;
+	struct xgene_msi *xgene_msi;
+	unsigned int virq;
+	int msir_index, msir_val, hw_irq;
+	u32 intr_index, grp_select, msi_grp;
+
+	chained_irq_enter(chip, desc);
+
+	msi_groups = irq_desc_get_handler_data(desc);
+	xgene_msi = msi_groups->msi;
+	msi_grp = msi_groups->msi_grp;
+
+	/*
+	 * MSIINTn (n is 0..F) indicates if there is a pending MSI interrupt
+	 * If bit x of this register is set (x is 0..7), one or more interupts
+	 * corresponding to MSInIRx is set.
+	 */
+	grp_select = xgene_msi_int_read(xgene_msi, msi_grp);
+	while (grp_select) {
+		msir_index = ffs(grp_select) - 1;
+		/*
+		 * Calculate MSInIRx address to read to check for interrupts
+		 * (refer to termination address and data assignment
+		 * described in xgene_compose_msi_msg() )
+		 */
+		msir_val = xgene_msi_ir_read(xgene_msi, msi_grp, msir_index);
+		while (msir_val) {
+			intr_index = ffs(msir_val) - 1;
+			/*
+			 * Calculate MSI vector number (refer to the termination
+			 * address and data assignment described in
+			 * xgene_compose_msi_msg function)
+			 */
+			hw_irq = (((msir_index * IRQS_PER_IDX) + intr_index) *
+				 NR_HW_IRQS) + msi_grp;
+			/*
+			 * As we have multiple hw_irq that maps to single MSI,
+			 * always look up the virq using the hw_irq as seen from
+			 * CPU0
+			 */
+			hw_irq = hwirq_to_canonical_hwirq(hw_irq);
+			virq = irq_find_mapping(xgene_msi->inner_domain, hw_irq);
+			WARN_ON(!virq);
+			if (virq != 0)
+				generic_handle_irq(virq);
+			msir_val &= ~(1 << intr_index);
+		}
+		grp_select &= ~(1 << msir_index);
+
+		if (!grp_select) {
+			/*
+			 * We handled all interrupts happened in this group,
+			 * resample this group MSI_INTx register in case
+			 * something else has been made pending in the meantime
+			 */
+			grp_select = xgene_msi_int_read(xgene_msi, msi_grp);
+		}
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+static enum cpuhp_state pci_xgene_online;
+
+static int xgene_msi_remove(struct platform_device *pdev)
+{
+	struct xgene_msi *msi = platform_get_drvdata(pdev);
+
+	if (pci_xgene_online)
+		cpuhp_remove_state(pci_xgene_online);
+	cpuhp_remove_state(CPUHP_PCI_XGENE_DEAD);
+
+	kfree(msi->msi_groups);
+
+	kfree(msi->bitmap);
+	msi->bitmap = NULL;
+
+	xgene_free_domains(msi);
+
+	return 0;
+}
+
+static int xgene_msi_hwirq_alloc(unsigned int cpu)
+{
+	struct xgene_msi *msi = &xgene_msi_ctrl;
+	struct xgene_msi_group *msi_group;
+	cpumask_var_t mask;
+	int i;
+	int err;
+
+	for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) {
+		msi_group = &msi->msi_groups[i];
+		if (!msi_group->gic_irq)
+			continue;
+
+		irq_set_chained_handler_and_data(msi_group->gic_irq,
+			xgene_msi_isr, msi_group);
+
+		/*
+		 * Statically allocate MSI GIC IRQs to each CPU core.
+		 * With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated
+		 * to each core.
+		 */
+		if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
+			cpumask_clear(mask);
+			cpumask_set_cpu(cpu, mask);
+			err = irq_set_affinity(msi_group->gic_irq, mask);
+			if (err)
+				pr_err("failed to set affinity for GIC IRQ");
+			free_cpumask_var(mask);
+		} else {
+			pr_err("failed to alloc CPU mask for affinity\n");
+			err = -EINVAL;
+		}
+
+		if (err) {
+			irq_set_chained_handler_and_data(msi_group->gic_irq,
+							 NULL, NULL);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+static int xgene_msi_hwirq_free(unsigned int cpu)
+{
+	struct xgene_msi *msi = &xgene_msi_ctrl;
+	struct xgene_msi_group *msi_group;
+	int i;
+
+	for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) {
+		msi_group = &msi->msi_groups[i];
+		if (!msi_group->gic_irq)
+			continue;
+
+		irq_set_chained_handler_and_data(msi_group->gic_irq, NULL,
+						 NULL);
+	}
+	return 0;
+}
+
+static const struct of_device_id xgene_msi_match_table[] = {
+	{.compatible = "apm,xgene1-msi"},
+	{},
+};
+
+static int xgene_msi_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	int rc, irq_index;
+	struct xgene_msi *xgene_msi;
+	int virt_msir;
+	u32 msi_val, msi_idx;
+
+	xgene_msi = &xgene_msi_ctrl;
+
+	platform_set_drvdata(pdev, xgene_msi);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	xgene_msi->msi_regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(xgene_msi->msi_regs)) {
+		dev_err(&pdev->dev, "no reg space\n");
+		rc = PTR_ERR(xgene_msi->msi_regs);
+		goto error;
+	}
+	xgene_msi->msi_addr = res->start;
+	xgene_msi->node = pdev->dev.of_node;
+	xgene_msi->num_cpus = num_possible_cpus();
+
+	rc = xgene_msi_init_allocator(xgene_msi);
+	if (rc) {
+		dev_err(&pdev->dev, "Error allocating MSI bitmap\n");
+		goto error;
+	}
+
+	rc = xgene_allocate_domains(xgene_msi);
+	if (rc) {
+		dev_err(&pdev->dev, "Failed to allocate MSI domain\n");
+		goto error;
+	}
+
+	for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) {
+		virt_msir = platform_get_irq(pdev, irq_index);
+		if (virt_msir < 0) {
+			dev_err(&pdev->dev, "Cannot translate IRQ index %d\n",
+				irq_index);
+			rc = virt_msir;
+			goto error;
+		}
+		xgene_msi->msi_groups[irq_index].gic_irq = virt_msir;
+		xgene_msi->msi_groups[irq_index].msi_grp = irq_index;
+		xgene_msi->msi_groups[irq_index].msi = xgene_msi;
+	}
+
+	/*
+	 * MSInIRx registers are read-to-clear; before registering
+	 * interrupt handlers, read all of them to clear spurious
+	 * interrupts that may occur before the driver is probed.
+	 */
+	for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) {
+		for (msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++)
+			msi_val = xgene_msi_ir_read(xgene_msi, irq_index,
+						    msi_idx);
+		/* Read MSIINTn to confirm */
+		msi_val = xgene_msi_int_read(xgene_msi, irq_index);
+		if (msi_val) {
+			dev_err(&pdev->dev, "Failed to clear spurious IRQ\n");
+			rc = -EINVAL;
+			goto error;
+		}
+	}
+
+	rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online",
+			       xgene_msi_hwirq_alloc, NULL);
+	if (rc < 0)
+		goto err_cpuhp;
+	pci_xgene_online = rc;
+	rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL,
+			       xgene_msi_hwirq_free);
+	if (rc)
+		goto err_cpuhp;
+
+	dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n");
+
+	return 0;
+
+err_cpuhp:
+	dev_err(&pdev->dev, "failed to add CPU MSI notifier\n");
+error:
+	xgene_msi_remove(pdev);
+	return rc;
+}
+
+static struct platform_driver xgene_msi_driver = {
+	.driver = {
+		.name = "xgene-msi",
+		.of_match_table = xgene_msi_match_table,
+	},
+	.probe = xgene_msi_probe,
+	.remove = xgene_msi_remove,
+};
+
+static int __init xgene_pcie_msi_init(void)
+{
+	return platform_driver_register(&xgene_msi_driver);
+}
+subsys_initcall(xgene_pcie_msi_init);
diff --git a/marvell/linux/drivers/pci/controller/pci-xgene.c b/marvell/linux/drivers/pci/controller/pci-xgene.c
new file mode 100644
index 0000000..ffda3e8
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pci-xgene.c
@@ -0,0 +1,689 @@
+// SPDX-License-Identifier: GPL-2.0+
+/**
+ * APM X-Gene PCIe Driver
+ *
+ * Copyright (c) 2014 Applied Micro Circuits Corporation.
+ *
+ * Author: Tanmay Inamdar <tinamdar@apm.com>.
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/memblock.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "../pci.h"
+
+#define PCIECORE_CTLANDSTATUS		0x50
+#define PIM1_1L				0x80
+#define IBAR2				0x98
+#define IR2MSK				0x9c
+#define PIM2_1L				0xa0
+#define IBAR3L				0xb4
+#define IR3MSKL				0xbc
+#define PIM3_1L				0xc4
+#define OMR1BARL			0x100
+#define OMR2BARL			0x118
+#define OMR3BARL			0x130
+#define CFGBARL				0x154
+#define CFGBARH				0x158
+#define CFGCTL				0x15c
+#define RTDID				0x160
+#define BRIDGE_CFG_0			0x2000
+#define BRIDGE_CFG_4			0x2010
+#define BRIDGE_STATUS_0			0x2600
+
+#define LINK_UP_MASK			0x00000100
+#define AXI_EP_CFG_ACCESS		0x10000
+#define EN_COHERENCY			0xF0000000
+#define EN_REG				0x00000001
+#define OB_LO_IO			0x00000002
+#define XGENE_PCIE_VENDORID		0x10E8
+#define XGENE_PCIE_DEVICEID		0xE004
+#define SZ_1T				(SZ_1G*1024ULL)
+#define PIPE_PHY_RATE_RD(src)		((0xc000 & (u32)(src)) >> 0xe)
+
+#define XGENE_V1_PCI_EXP_CAP		0x40
+
+/* PCIe IP version */
+#define XGENE_PCIE_IP_VER_UNKN		0
+#define XGENE_PCIE_IP_VER_1		1
+#define XGENE_PCIE_IP_VER_2		2
+
+#if defined(CONFIG_PCI_XGENE) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
+struct xgene_pcie_port {
+	struct device_node	*node;
+	struct device		*dev;
+	struct clk		*clk;
+	void __iomem		*csr_base;
+	void __iomem		*cfg_base;
+	unsigned long		cfg_addr;
+	bool			link_up;
+	u32			version;
+};
+
+static u32 xgene_pcie_readl(struct xgene_pcie_port *port, u32 reg)
+{
+	return readl(port->csr_base + reg);
+}
+
+static void xgene_pcie_writel(struct xgene_pcie_port *port, u32 reg, u32 val)
+{
+	writel(val, port->csr_base + reg);
+}
+
+static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
+{
+	return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags;
+}
+
+static inline struct xgene_pcie_port *pcie_bus_to_port(struct pci_bus *bus)
+{
+	struct pci_config_window *cfg;
+
+	if (acpi_disabled)
+		return (struct xgene_pcie_port *)(bus->sysdata);
+
+	cfg = bus->sysdata;
+	return (struct xgene_pcie_port *)(cfg->priv);
+}
+
+/*
+ * When the address bit [17:16] is 2'b01, the Configuration access will be
+ * treated as Type 1 and it will be forwarded to external PCIe device.
+ */
+static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus)
+{
+	struct xgene_pcie_port *port = pcie_bus_to_port(bus);
+
+	if (bus->number >= (bus->primary + 1))
+		return port->cfg_base + AXI_EP_CFG_ACCESS;
+
+	return port->cfg_base;
+}
+
+/*
+ * For Configuration request, RTDID register is used as Bus Number,
+ * Device Number and Function number of the header fields.
+ */
+static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn)
+{
+	struct xgene_pcie_port *port = pcie_bus_to_port(bus);
+	unsigned int b, d, f;
+	u32 rtdid_val = 0;
+
+	b = bus->number;
+	d = PCI_SLOT(devfn);
+	f = PCI_FUNC(devfn);
+
+	if (!pci_is_root_bus(bus))
+		rtdid_val = (b << 8) | (d << 3) | f;
+
+	xgene_pcie_writel(port, RTDID, rtdid_val);
+	/* read the register back to ensure flush */
+	xgene_pcie_readl(port, RTDID);
+}
+
+/*
+ * X-Gene PCIe port uses BAR0-BAR1 of RC's configuration space as
+ * the translation from PCI bus to native BUS.  Entire DDR region
+ * is mapped into PCIe space using these registers, so it can be
+ * reached by DMA from EP devices.  The BAR0/1 of bridge should be
+ * hidden during enumeration to avoid the sizing and resource allocation
+ * by PCIe core.
+ */
+static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset)
+{
+	if (pci_is_root_bus(bus) && ((offset == PCI_BASE_ADDRESS_0) ||
+				     (offset == PCI_BASE_ADDRESS_1)))
+		return true;
+
+	return false;
+}
+
+static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+					int offset)
+{
+	if ((pci_is_root_bus(bus) && devfn != 0) ||
+	    xgene_pcie_hide_rc_bars(bus, offset))
+		return NULL;
+
+	xgene_pcie_set_rtdid_reg(bus, devfn);
+	return xgene_pcie_get_cfg_base(bus) + offset;
+}
+
+static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
+				    int where, int size, u32 *val)
+{
+	struct xgene_pcie_port *port = pcie_bus_to_port(bus);
+
+	if (pci_generic_config_read32(bus, devfn, where & ~0x3, 4, val) !=
+	    PCIBIOS_SUCCESSFUL)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	/*
+	 * The v1 controller has a bug in its Configuration Request
+	 * Retry Status (CRS) logic: when CRS is enabled and we read the
+	 * Vendor and Device ID of a non-existent device, the controller
+	 * fabricates return data of 0xFFFF0001 ("device exists but is not
+	 * ready") instead of 0xFFFFFFFF ("device does not exist").  This
+	 * causes the PCI core to retry the read until it times out.
+	 * Avoid this by not claiming to support CRS.
+	 */
+	if (pci_is_root_bus(bus) && (port->version == XGENE_PCIE_IP_VER_1) &&
+	    ((where & ~0x3) == XGENE_V1_PCI_EXP_CAP + PCI_EXP_RTCTL))
+		*val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
+
+	if (size <= 2)
+		*val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
+
+	return PCIBIOS_SUCCESSFUL;
+}
+#endif
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+static int xgene_get_csr_resource(struct acpi_device *adev,
+				  struct resource *res)
+{
+	struct device *dev = &adev->dev;
+	struct resource_entry *entry;
+	struct list_head list;
+	unsigned long flags;
+	int ret;
+
+	INIT_LIST_HEAD(&list);
+	flags = IORESOURCE_MEM;
+	ret = acpi_dev_get_resources(adev, &list,
+				     acpi_dev_filter_resource_type_cb,
+				     (void *) flags);
+	if (ret < 0) {
+		dev_err(dev, "failed to parse _CRS method, error code %d\n",
+			ret);
+		return ret;
+	}
+
+	if (ret == 0) {
+		dev_err(dev, "no IO and memory resources present in _CRS\n");
+		return -EINVAL;
+	}
+
+	entry = list_first_entry(&list, struct resource_entry, node);
+	*res = *entry->res;
+	acpi_dev_free_resource_list(&list);
+	return 0;
+}
+
+static int xgene_pcie_ecam_init(struct pci_config_window *cfg, u32 ipversion)
+{
+	struct device *dev = cfg->parent;
+	struct acpi_device *adev = to_acpi_device(dev);
+	struct xgene_pcie_port *port;
+	struct resource csr;
+	int ret;
+
+	port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+	if (!port)
+		return -ENOMEM;
+
+	ret = xgene_get_csr_resource(adev, &csr);
+	if (ret) {
+		dev_err(dev, "can't get CSR resource\n");
+		return ret;
+	}
+	port->csr_base = devm_pci_remap_cfg_resource(dev, &csr);
+	if (IS_ERR(port->csr_base))
+		return PTR_ERR(port->csr_base);
+
+	port->cfg_base = cfg->win;
+	port->version = ipversion;
+
+	cfg->priv = port;
+	return 0;
+}
+
+static int xgene_v1_pcie_ecam_init(struct pci_config_window *cfg)
+{
+	return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_1);
+}
+
+struct pci_ecam_ops xgene_v1_pcie_ecam_ops = {
+	.bus_shift	= 16,
+	.init		= xgene_v1_pcie_ecam_init,
+	.pci_ops	= {
+		.map_bus	= xgene_pcie_map_bus,
+		.read		= xgene_pcie_config_read32,
+		.write		= pci_generic_config_write,
+	}
+};
+
+static int xgene_v2_pcie_ecam_init(struct pci_config_window *cfg)
+{
+	return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_2);
+}
+
+struct pci_ecam_ops xgene_v2_pcie_ecam_ops = {
+	.bus_shift	= 16,
+	.init		= xgene_v2_pcie_ecam_init,
+	.pci_ops	= {
+		.map_bus	= xgene_pcie_map_bus,
+		.read		= xgene_pcie_config_read32,
+		.write		= pci_generic_config_write,
+	}
+};
+#endif
+
+#if defined(CONFIG_PCI_XGENE)
+static u64 xgene_pcie_set_ib_mask(struct xgene_pcie_port *port, u32 addr,
+				  u32 flags, u64 size)
+{
+	u64 mask = (~(size - 1) & PCI_BASE_ADDRESS_MEM_MASK) | flags;
+	u32 val32 = 0;
+	u32 val;
+
+	val32 = xgene_pcie_readl(port, addr);
+	val = (val32 & 0x0000ffff) | (lower_32_bits(mask) << 16);
+	xgene_pcie_writel(port, addr, val);
+
+	val32 = xgene_pcie_readl(port, addr + 0x04);
+	val = (val32 & 0xffff0000) | (lower_32_bits(mask) >> 16);
+	xgene_pcie_writel(port, addr + 0x04, val);
+
+	val32 = xgene_pcie_readl(port, addr + 0x04);
+	val = (val32 & 0x0000ffff) | (upper_32_bits(mask) << 16);
+	xgene_pcie_writel(port, addr + 0x04, val);
+
+	val32 = xgene_pcie_readl(port, addr + 0x08);
+	val = (val32 & 0xffff0000) | (upper_32_bits(mask) >> 16);
+	xgene_pcie_writel(port, addr + 0x08, val);
+
+	return mask;
+}
+
+static void xgene_pcie_linkup(struct xgene_pcie_port *port,
+			      u32 *lanes, u32 *speed)
+{
+	u32 val32;
+
+	port->link_up = false;
+	val32 = xgene_pcie_readl(port, PCIECORE_CTLANDSTATUS);
+	if (val32 & LINK_UP_MASK) {
+		port->link_up = true;
+		*speed = PIPE_PHY_RATE_RD(val32);
+		val32 = xgene_pcie_readl(port, BRIDGE_STATUS_0);
+		*lanes = val32 >> 26;
+	}
+}
+
+static int xgene_pcie_init_port(struct xgene_pcie_port *port)
+{
+	struct device *dev = port->dev;
+	int rc;
+
+	port->clk = clk_get(dev, NULL);
+	if (IS_ERR(port->clk)) {
+		dev_err(dev, "clock not available\n");
+		return -ENODEV;
+	}
+
+	rc = clk_prepare_enable(port->clk);
+	if (rc) {
+		dev_err(dev, "clock enable failed\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
+			      struct platform_device *pdev)
+{
+	struct device *dev = port->dev;
+	struct resource *res;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr");
+	port->csr_base = devm_pci_remap_cfg_resource(dev, res);
+	if (IS_ERR(port->csr_base))
+		return PTR_ERR(port->csr_base);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+	port->cfg_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(port->cfg_base))
+		return PTR_ERR(port->cfg_base);
+	port->cfg_addr = res->start;
+
+	return 0;
+}
+
+static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port,
+				    struct resource *res, u32 offset,
+				    u64 cpu_addr, u64 pci_addr)
+{
+	struct device *dev = port->dev;
+	resource_size_t size = resource_size(res);
+	u64 restype = resource_type(res);
+	u64 mask = 0;
+	u32 min_size;
+	u32 flag = EN_REG;
+
+	if (restype == IORESOURCE_MEM) {
+		min_size = SZ_128M;
+	} else {
+		min_size = 128;
+		flag |= OB_LO_IO;
+	}
+
+	if (size >= min_size)
+		mask = ~(size - 1) | flag;
+	else
+		dev_warn(dev, "res size 0x%llx less than minimum 0x%x\n",
+			 (u64)size, min_size);
+
+	xgene_pcie_writel(port, offset, lower_32_bits(cpu_addr));
+	xgene_pcie_writel(port, offset + 0x04, upper_32_bits(cpu_addr));
+	xgene_pcie_writel(port, offset + 0x08, lower_32_bits(mask));
+	xgene_pcie_writel(port, offset + 0x0c, upper_32_bits(mask));
+	xgene_pcie_writel(port, offset + 0x10, lower_32_bits(pci_addr));
+	xgene_pcie_writel(port, offset + 0x14, upper_32_bits(pci_addr));
+}
+
+static void xgene_pcie_setup_cfg_reg(struct xgene_pcie_port *port)
+{
+	u64 addr = port->cfg_addr;
+
+	xgene_pcie_writel(port, CFGBARL, lower_32_bits(addr));
+	xgene_pcie_writel(port, CFGBARH, upper_32_bits(addr));
+	xgene_pcie_writel(port, CFGCTL, EN_REG);
+}
+
+static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
+				 struct list_head *res,
+				 resource_size_t io_base)
+{
+	struct resource_entry *window;
+	struct device *dev = port->dev;
+	int ret;
+
+	resource_list_for_each_entry(window, res) {
+		struct resource *res = window->res;
+		u64 restype = resource_type(res);
+
+		dev_dbg(dev, "%pR\n", res);
+
+		switch (restype) {
+		case IORESOURCE_IO:
+			xgene_pcie_setup_ob_reg(port, res, OMR3BARL, io_base,
+						res->start - window->offset);
+			ret = devm_pci_remap_iospace(dev, res, io_base);
+			if (ret < 0)
+				return ret;
+			break;
+		case IORESOURCE_MEM:
+			if (res->flags & IORESOURCE_PREFETCH)
+				xgene_pcie_setup_ob_reg(port, res, OMR2BARL,
+							res->start,
+							res->start -
+							window->offset);
+			else
+				xgene_pcie_setup_ob_reg(port, res, OMR1BARL,
+							res->start,
+							res->start -
+							window->offset);
+			break;
+		case IORESOURCE_BUS:
+			break;
+		default:
+			dev_err(dev, "invalid resource %pR\n", res);
+			return -EINVAL;
+		}
+	}
+	xgene_pcie_setup_cfg_reg(port);
+	return 0;
+}
+
+static void xgene_pcie_setup_pims(struct xgene_pcie_port *port, u32 pim_reg,
+				  u64 pim, u64 size)
+{
+	xgene_pcie_writel(port, pim_reg, lower_32_bits(pim));
+	xgene_pcie_writel(port, pim_reg + 0x04,
+			  upper_32_bits(pim) | EN_COHERENCY);
+	xgene_pcie_writel(port, pim_reg + 0x10, lower_32_bits(size));
+	xgene_pcie_writel(port, pim_reg + 0x14, upper_32_bits(size));
+}
+
+/*
+ * X-Gene PCIe support maximum 3 inbound memory regions
+ * This function helps to select a region based on size of region
+ */
+static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size)
+{
+	if ((size > 4) && (size < SZ_16M) && !(*ib_reg_mask & (1 << 1))) {
+		*ib_reg_mask |= (1 << 1);
+		return 1;
+	}
+
+	if ((size > SZ_1K) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 0))) {
+		*ib_reg_mask |= (1 << 0);
+		return 0;
+	}
+
+	if ((size > SZ_1M) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 2))) {
+		*ib_reg_mask |= (1 << 2);
+		return 2;
+	}
+
+	return -EINVAL;
+}
+
+static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
+				    struct of_pci_range *range, u8 *ib_reg_mask)
+{
+	void __iomem *cfg_base = port->cfg_base;
+	struct device *dev = port->dev;
+	void *bar_addr;
+	u32 pim_reg;
+	u64 cpu_addr = range->cpu_addr;
+	u64 pci_addr = range->pci_addr;
+	u64 size = range->size;
+	u64 mask = ~(size - 1) | EN_REG;
+	u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64;
+	u32 bar_low;
+	int region;
+
+	region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size);
+	if (region < 0) {
+		dev_warn(dev, "invalid pcie dma-range config\n");
+		return;
+	}
+
+	if (range->flags & IORESOURCE_PREFETCH)
+		flags |= PCI_BASE_ADDRESS_MEM_PREFETCH;
+
+	bar_low = pcie_bar_low_val((u32)cpu_addr, flags);
+	switch (region) {
+	case 0:
+		xgene_pcie_set_ib_mask(port, BRIDGE_CFG_4, flags, size);
+		bar_addr = cfg_base + PCI_BASE_ADDRESS_0;
+		writel(bar_low, bar_addr);
+		writel(upper_32_bits(cpu_addr), bar_addr + 0x4);
+		pim_reg = PIM1_1L;
+		break;
+	case 1:
+		xgene_pcie_writel(port, IBAR2, bar_low);
+		xgene_pcie_writel(port, IR2MSK, lower_32_bits(mask));
+		pim_reg = PIM2_1L;
+		break;
+	case 2:
+		xgene_pcie_writel(port, IBAR3L, bar_low);
+		xgene_pcie_writel(port, IBAR3L + 0x4, upper_32_bits(cpu_addr));
+		xgene_pcie_writel(port, IR3MSKL, lower_32_bits(mask));
+		xgene_pcie_writel(port, IR3MSKL + 0x4, upper_32_bits(mask));
+		pim_reg = PIM3_1L;
+		break;
+	}
+
+	xgene_pcie_setup_pims(port, pim_reg, pci_addr, ~(size - 1));
+}
+
+static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
+{
+	struct device_node *np = port->node;
+	struct of_pci_range range;
+	struct of_pci_range_parser parser;
+	struct device *dev = port->dev;
+	u8 ib_reg_mask = 0;
+
+	if (of_pci_dma_range_parser_init(&parser, np)) {
+		dev_err(dev, "missing dma-ranges property\n");
+		return -EINVAL;
+	}
+
+	/* Get the dma-ranges from DT */
+	for_each_of_pci_range(&parser, &range) {
+		u64 end = range.cpu_addr + range.size - 1;
+
+		dev_dbg(dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
+			range.flags, range.cpu_addr, end, range.pci_addr);
+		xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask);
+	}
+	return 0;
+}
+
+/* clear BAR configuration which was done by firmware */
+static void xgene_pcie_clear_config(struct xgene_pcie_port *port)
+{
+	int i;
+
+	for (i = PIM1_1L; i <= CFGCTL; i += 4)
+		xgene_pcie_writel(port, i, 0);
+}
+
+static int xgene_pcie_setup(struct xgene_pcie_port *port, struct list_head *res,
+			    resource_size_t io_base)
+{
+	struct device *dev = port->dev;
+	u32 val, lanes = 0, speed = 0;
+	int ret;
+
+	xgene_pcie_clear_config(port);
+
+	/* setup the vendor and device IDs correctly */
+	val = (XGENE_PCIE_DEVICEID << 16) | XGENE_PCIE_VENDORID;
+	xgene_pcie_writel(port, BRIDGE_CFG_0, val);
+
+	ret = xgene_pcie_map_ranges(port, res, io_base);
+	if (ret)
+		return ret;
+
+	ret = xgene_pcie_parse_map_dma_ranges(port);
+	if (ret)
+		return ret;
+
+	xgene_pcie_linkup(port, &lanes, &speed);
+	if (!port->link_up)
+		dev_info(dev, "(rc) link down\n");
+	else
+		dev_info(dev, "(rc) x%d gen-%d link up\n", lanes, speed + 1);
+	return 0;
+}
+
+static struct pci_ops xgene_pcie_ops = {
+	.map_bus = xgene_pcie_map_bus,
+	.read = xgene_pcie_config_read32,
+	.write = pci_generic_config_write32,
+};
+
+static int xgene_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *dn = dev->of_node;
+	struct xgene_pcie_port *port;
+	resource_size_t iobase = 0;
+	struct pci_bus *bus, *child;
+	struct pci_host_bridge *bridge;
+	int ret;
+	LIST_HEAD(res);
+
+	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
+	if (!bridge)
+		return -ENOMEM;
+
+	port = pci_host_bridge_priv(bridge);
+
+	port->node = of_node_get(dn);
+	port->dev = dev;
+
+	port->version = XGENE_PCIE_IP_VER_UNKN;
+	if (of_device_is_compatible(port->node, "apm,xgene-pcie"))
+		port->version = XGENE_PCIE_IP_VER_1;
+
+	ret = xgene_pcie_map_reg(port, pdev);
+	if (ret)
+		return ret;
+
+	ret = xgene_pcie_init_port(port);
+	if (ret)
+		return ret;
+
+	ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
+						    &iobase);
+	if (ret)
+		return ret;
+
+	ret = devm_request_pci_bus_resources(dev, &res);
+	if (ret)
+		goto error;
+
+	ret = xgene_pcie_setup(port, &res, iobase);
+	if (ret)
+		goto error;
+
+	list_splice_init(&res, &bridge->windows);
+	bridge->dev.parent = dev;
+	bridge->sysdata = port;
+	bridge->busnr = 0;
+	bridge->ops = &xgene_pcie_ops;
+	bridge->map_irq = of_irq_parse_and_map_pci;
+	bridge->swizzle_irq = pci_common_swizzle;
+
+	ret = pci_scan_root_bus_bridge(bridge);
+	if (ret < 0)
+		goto error;
+
+	bus = bridge->bus;
+
+	pci_assign_unassigned_bus_resources(bus);
+	list_for_each_entry(child, &bus->children, node)
+		pcie_bus_configure_settings(child);
+	pci_bus_add_devices(bus);
+	return 0;
+
+error:
+	pci_free_resource_list(&res);
+	return ret;
+}
+
+static const struct of_device_id xgene_pcie_match_table[] = {
+	{.compatible = "apm,xgene-pcie",},
+	{},
+};
+
+static struct platform_driver xgene_pcie_driver = {
+	.driver = {
+		.name = "xgene-pcie",
+		.of_match_table = of_match_ptr(xgene_pcie_match_table),
+		.suppress_bind_attrs = true,
+	},
+	.probe = xgene_pcie_probe,
+};
+builtin_platform_driver(xgene_pcie_driver);
+#endif
diff --git a/marvell/linux/drivers/pci/controller/pcie-altera-msi.c b/marvell/linux/drivers/pci/controller/pcie-altera-msi.c
new file mode 100644
index 0000000..16d9389
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pcie-altera-msi.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Altera PCIe MSI support
+ *
+ * Author: Ley Foon Tan <lftan@altera.com>
+ *
+ * Copyright Altera Corporation (C) 2013-2015. All rights reserved
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define MSI_STATUS		0x0
+#define MSI_ERROR		0x4
+#define MSI_INTMASK		0x8
+
+#define MAX_MSI_VECTORS		32
+
+struct altera_msi {
+	DECLARE_BITMAP(used, MAX_MSI_VECTORS);
+	struct mutex		lock;	/* protect "used" bitmap */
+	struct platform_device	*pdev;
+	struct irq_domain	*msi_domain;
+	struct irq_domain	*inner_domain;
+	void __iomem		*csr_base;
+	void __iomem		*vector_base;
+	phys_addr_t		vector_phy;
+	u32			num_of_vectors;
+	int			irq;
+};
+
+static inline void msi_writel(struct altera_msi *msi, const u32 value,
+			      const u32 reg)
+{
+	writel_relaxed(value, msi->csr_base + reg);
+}
+
+static inline u32 msi_readl(struct altera_msi *msi, const u32 reg)
+{
+	return readl_relaxed(msi->csr_base + reg);
+}
+
+static void altera_msi_isr(struct irq_desc *desc)
+{
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct altera_msi *msi;
+	unsigned long status;
+	u32 bit;
+	u32 virq;
+
+	chained_irq_enter(chip, desc);
+	msi = irq_desc_get_handler_data(desc);
+
+	while ((status = msi_readl(msi, MSI_STATUS)) != 0) {
+		for_each_set_bit(bit, &status, msi->num_of_vectors) {
+			/* Dummy read from vector to clear the interrupt */
+			readl_relaxed(msi->vector_base + (bit * sizeof(u32)));
+
+			virq = irq_find_mapping(msi->inner_domain, bit);
+			if (virq)
+				generic_handle_irq(virq);
+			else
+				dev_err(&msi->pdev->dev, "unexpected MSI\n");
+		}
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+static struct irq_chip altera_msi_irq_chip = {
+	.name = "Altera PCIe MSI",
+	.irq_mask = pci_msi_mask_irq,
+	.irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info altera_msi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+		     MSI_FLAG_PCI_MSIX),
+	.chip	= &altera_msi_irq_chip,
+};
+
+static void altera_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+	struct altera_msi *msi = irq_data_get_irq_chip_data(data);
+	phys_addr_t addr = msi->vector_phy + (data->hwirq * sizeof(u32));
+
+	msg->address_lo = lower_32_bits(addr);
+	msg->address_hi = upper_32_bits(addr);
+	msg->data = data->hwirq;
+
+	dev_dbg(&msi->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n",
+		(int)data->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static int altera_msi_set_affinity(struct irq_data *irq_data,
+				   const struct cpumask *mask, bool force)
+{
+	 return -EINVAL;
+}
+
+static struct irq_chip altera_msi_bottom_irq_chip = {
+	.name			= "Altera MSI",
+	.irq_compose_msi_msg	= altera_compose_msi_msg,
+	.irq_set_affinity	= altera_msi_set_affinity,
+};
+
+static int altera_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+				   unsigned int nr_irqs, void *args)
+{
+	struct altera_msi *msi = domain->host_data;
+	unsigned long bit;
+	u32 mask;
+
+	WARN_ON(nr_irqs != 1);
+	mutex_lock(&msi->lock);
+
+	bit = find_first_zero_bit(msi->used, msi->num_of_vectors);
+	if (bit >= msi->num_of_vectors) {
+		mutex_unlock(&msi->lock);
+		return -ENOSPC;
+	}
+
+	set_bit(bit, msi->used);
+
+	mutex_unlock(&msi->lock);
+
+	irq_domain_set_info(domain, virq, bit, &altera_msi_bottom_irq_chip,
+			    domain->host_data, handle_simple_irq,
+			    NULL, NULL);
+
+	mask = msi_readl(msi, MSI_INTMASK);
+	mask |= 1 << bit;
+	msi_writel(msi, mask, MSI_INTMASK);
+
+	return 0;
+}
+
+static void altera_irq_domain_free(struct irq_domain *domain,
+				   unsigned int virq, unsigned int nr_irqs)
+{
+	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+	struct altera_msi *msi = irq_data_get_irq_chip_data(d);
+	u32 mask;
+
+	mutex_lock(&msi->lock);
+
+	if (!test_bit(d->hwirq, msi->used)) {
+		dev_err(&msi->pdev->dev, "trying to free unused MSI#%lu\n",
+			d->hwirq);
+	} else {
+		__clear_bit(d->hwirq, msi->used);
+		mask = msi_readl(msi, MSI_INTMASK);
+		mask &= ~(1 << d->hwirq);
+		msi_writel(msi, mask, MSI_INTMASK);
+	}
+
+	mutex_unlock(&msi->lock);
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+	.alloc	= altera_irq_domain_alloc,
+	.free	= altera_irq_domain_free,
+};
+
+static int altera_allocate_domains(struct altera_msi *msi)
+{
+	struct fwnode_handle *fwnode = of_node_to_fwnode(msi->pdev->dev.of_node);
+
+	msi->inner_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
+					     &msi_domain_ops, msi);
+	if (!msi->inner_domain) {
+		dev_err(&msi->pdev->dev, "failed to create IRQ domain\n");
+		return -ENOMEM;
+	}
+
+	msi->msi_domain = pci_msi_create_irq_domain(fwnode,
+				&altera_msi_domain_info, msi->inner_domain);
+	if (!msi->msi_domain) {
+		dev_err(&msi->pdev->dev, "failed to create MSI domain\n");
+		irq_domain_remove(msi->inner_domain);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void altera_free_domains(struct altera_msi *msi)
+{
+	irq_domain_remove(msi->msi_domain);
+	irq_domain_remove(msi->inner_domain);
+}
+
+static int altera_msi_remove(struct platform_device *pdev)
+{
+	struct altera_msi *msi = platform_get_drvdata(pdev);
+
+	msi_writel(msi, 0, MSI_INTMASK);
+	irq_set_chained_handler(msi->irq, NULL);
+	irq_set_handler_data(msi->irq, NULL);
+
+	altera_free_domains(msi);
+
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static int altera_msi_probe(struct platform_device *pdev)
+{
+	struct altera_msi *msi;
+	struct device_node *np = pdev->dev.of_node;
+	struct resource *res;
+	int ret;
+
+	msi = devm_kzalloc(&pdev->dev, sizeof(struct altera_msi),
+			   GFP_KERNEL);
+	if (!msi)
+		return -ENOMEM;
+
+	mutex_init(&msi->lock);
+	msi->pdev = pdev;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr");
+	msi->csr_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(msi->csr_base)) {
+		dev_err(&pdev->dev, "failed to map csr memory\n");
+		return PTR_ERR(msi->csr_base);
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "vector_slave");
+	msi->vector_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(msi->vector_base)) {
+		dev_err(&pdev->dev, "failed to map vector_slave memory\n");
+		return PTR_ERR(msi->vector_base);
+	}
+
+	msi->vector_phy = res->start;
+
+	if (of_property_read_u32(np, "num-vectors", &msi->num_of_vectors)) {
+		dev_err(&pdev->dev, "failed to parse the number of vectors\n");
+		return -EINVAL;
+	}
+
+	ret = altera_allocate_domains(msi);
+	if (ret)
+		return ret;
+
+	msi->irq = platform_get_irq(pdev, 0);
+	if (msi->irq < 0) {
+		dev_err(&pdev->dev, "failed to map IRQ: %d\n", msi->irq);
+		ret = msi->irq;
+		goto err;
+	}
+
+	irq_set_chained_handler_and_data(msi->irq, altera_msi_isr, msi);
+	platform_set_drvdata(pdev, msi);
+
+	return 0;
+
+err:
+	altera_msi_remove(pdev);
+	return ret;
+}
+
+static const struct of_device_id altera_msi_of_match[] = {
+	{ .compatible = "altr,msi-1.0", NULL },
+	{ },
+};
+
+static struct platform_driver altera_msi_driver = {
+	.driver = {
+		.name = "altera-msi",
+		.of_match_table = altera_msi_of_match,
+	},
+	.probe = altera_msi_probe,
+	.remove = altera_msi_remove,
+};
+
+static int __init altera_msi_init(void)
+{
+	return platform_driver_register(&altera_msi_driver);
+}
+
+static void __exit altera_msi_exit(void)
+{
+	platform_driver_unregister(&altera_msi_driver);
+}
+
+subsys_initcall(altera_msi_init);
+MODULE_DEVICE_TABLE(of, altera_msi_of_match);
+module_exit(altera_msi_exit);
+MODULE_LICENSE("GPL v2");
diff --git a/marvell/linux/drivers/pci/controller/pcie-altera.c b/marvell/linux/drivers/pci/controller/pcie-altera.c
new file mode 100644
index 0000000..d2497ca
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pcie-altera.c
@@ -0,0 +1,904 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright Altera Corporation (C) 2013-2015. All rights reserved
+ *
+ * Author: Ley Foon Tan <lftan@altera.com>
+ * Description: Altera PCIe host controller driver
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "../pci.h"
+
+#define RP_TX_REG0			0x2000
+#define RP_TX_REG1			0x2004
+#define RP_TX_CNTRL			0x2008
+#define RP_TX_EOP			0x2
+#define RP_TX_SOP			0x1
+#define RP_RXCPL_STATUS			0x2010
+#define RP_RXCPL_EOP			0x2
+#define RP_RXCPL_SOP			0x1
+#define RP_RXCPL_REG0			0x2014
+#define RP_RXCPL_REG1			0x2018
+#define P2A_INT_STATUS			0x3060
+#define P2A_INT_STS_ALL			0xf
+#define P2A_INT_ENABLE			0x3070
+#define P2A_INT_ENA_ALL			0xf
+#define RP_LTSSM			0x3c64
+#define RP_LTSSM_MASK			0x1f
+#define LTSSM_L0			0xf
+
+#define S10_RP_TX_CNTRL			0x2004
+#define S10_RP_RXCPL_REG		0x2008
+#define S10_RP_RXCPL_STATUS		0x200C
+#define S10_RP_CFG_ADDR(pcie, reg)	\
+	(((pcie)->hip_base) + (reg) + (1 << 20))
+#define S10_RP_SECONDARY(pcie)		\
+	readb(S10_RP_CFG_ADDR(pcie, PCI_SECONDARY_BUS))
+
+/* TLP configuration type 0 and 1 */
+#define TLP_FMTTYPE_CFGRD0		0x04	/* Configuration Read Type 0 */
+#define TLP_FMTTYPE_CFGWR0		0x44	/* Configuration Write Type 0 */
+#define TLP_FMTTYPE_CFGRD1		0x05	/* Configuration Read Type 1 */
+#define TLP_FMTTYPE_CFGWR1		0x45	/* Configuration Write Type 1 */
+#define TLP_PAYLOAD_SIZE		0x01
+#define TLP_READ_TAG			0x1d
+#define TLP_WRITE_TAG			0x10
+#define RP_DEVFN			0
+#define TLP_REQ_ID(bus, devfn)		(((bus) << 8) | (devfn))
+#define TLP_CFG_DW0(pcie, cfg)		\
+		(((cfg) << 24) |	\
+		  TLP_PAYLOAD_SIZE)
+#define TLP_CFG_DW1(pcie, tag, be)	\
+	(((TLP_REQ_ID(pcie->root_bus_nr,  RP_DEVFN)) << 16) | (tag << 8) | (be))
+#define TLP_CFG_DW2(bus, devfn, offset)	\
+				(((bus) << 24) | ((devfn) << 16) | (offset))
+#define TLP_COMP_STATUS(s)		(((s) >> 13) & 7)
+#define TLP_BYTE_COUNT(s)		(((s) >> 0) & 0xfff)
+#define TLP_HDR_SIZE			3
+#define TLP_LOOP			500
+
+#define LINK_UP_TIMEOUT			HZ
+#define LINK_RETRAIN_TIMEOUT		HZ
+
+#define DWORD_MASK			3
+
+#define S10_TLP_FMTTYPE_CFGRD0		0x05
+#define S10_TLP_FMTTYPE_CFGRD1		0x04
+#define S10_TLP_FMTTYPE_CFGWR0		0x45
+#define S10_TLP_FMTTYPE_CFGWR1		0x44
+
+enum altera_pcie_version {
+	ALTERA_PCIE_V1 = 0,
+	ALTERA_PCIE_V2,
+};
+
+struct altera_pcie {
+	struct platform_device	*pdev;
+	void __iomem		*cra_base;
+	void __iomem		*hip_base;
+	int			irq;
+	u8			root_bus_nr;
+	struct irq_domain	*irq_domain;
+	struct resource		bus_range;
+	struct list_head	resources;
+	const struct altera_pcie_data	*pcie_data;
+};
+
+struct altera_pcie_ops {
+	int (*tlp_read_pkt)(struct altera_pcie *pcie, u32 *value);
+	void (*tlp_write_pkt)(struct altera_pcie *pcie, u32 *headers,
+			      u32 data, bool align);
+	bool (*get_link_status)(struct altera_pcie *pcie);
+	int (*rp_read_cfg)(struct altera_pcie *pcie, int where,
+			   int size, u32 *value);
+	int (*rp_write_cfg)(struct altera_pcie *pcie, u8 busno,
+			    int where, int size, u32 value);
+};
+
+struct altera_pcie_data {
+	const struct altera_pcie_ops *ops;
+	enum altera_pcie_version version;
+	u32 cap_offset;		/* PCIe capability structure register offset */
+	u32 cfgrd0;
+	u32 cfgrd1;
+	u32 cfgwr0;
+	u32 cfgwr1;
+};
+
+struct tlp_rp_regpair_t {
+	u32 ctrl;
+	u32 reg0;
+	u32 reg1;
+};
+
+static inline void cra_writel(struct altera_pcie *pcie, const u32 value,
+			      const u32 reg)
+{
+	writel_relaxed(value, pcie->cra_base + reg);
+}
+
+static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg)
+{
+	return readl_relaxed(pcie->cra_base + reg);
+}
+
+static bool altera_pcie_link_up(struct altera_pcie *pcie)
+{
+	return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0);
+}
+
+static bool s10_altera_pcie_link_up(struct altera_pcie *pcie)
+{
+	void __iomem *addr = S10_RP_CFG_ADDR(pcie,
+				   pcie->pcie_data->cap_offset +
+				   PCI_EXP_LNKSTA);
+
+	return !!(readw(addr) & PCI_EXP_LNKSTA_DLLLA);
+}
+
+/*
+ * Altera PCIe port uses BAR0 of RC's configuration space as the translation
+ * from PCI bus to native BUS.  Entire DDR region is mapped into PCIe space
+ * using these registers, so it can be reached by DMA from EP devices.
+ * This BAR0 will also access to MSI vector when receiving MSI/MSIX interrupt
+ * from EP devices, eventually trigger interrupt to GIC.  The BAR0 of bridge
+ * should be hidden during enumeration to avoid the sizing and resource
+ * allocation by PCIe core.
+ */
+static bool altera_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int  devfn,
+				    int offset)
+{
+	if (pci_is_root_bus(bus) && (devfn == 0) &&
+	    (offset == PCI_BASE_ADDRESS_0))
+		return true;
+
+	return false;
+}
+
+static void tlp_write_tx(struct altera_pcie *pcie,
+			 struct tlp_rp_regpair_t *tlp_rp_regdata)
+{
+	cra_writel(pcie, tlp_rp_regdata->reg0, RP_TX_REG0);
+	cra_writel(pcie, tlp_rp_regdata->reg1, RP_TX_REG1);
+	cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL);
+}
+
+static void s10_tlp_write_tx(struct altera_pcie *pcie, u32 reg0, u32 ctrl)
+{
+	cra_writel(pcie, reg0, RP_TX_REG0);
+	cra_writel(pcie, ctrl, S10_RP_TX_CNTRL);
+}
+
+static bool altera_pcie_valid_device(struct altera_pcie *pcie,
+				     struct pci_bus *bus, int dev)
+{
+	/* If there is no link, then there is no device */
+	if (bus->number != pcie->root_bus_nr) {
+		if (!pcie->pcie_data->ops->get_link_status(pcie))
+			return false;
+	}
+
+	/* access only one slot on each root port */
+	if (bus->number == pcie->root_bus_nr && dev > 0)
+		return false;
+
+	 return true;
+}
+
+static int tlp_read_packet(struct altera_pcie *pcie, u32 *value)
+{
+	int i;
+	bool sop = false;
+	u32 ctrl;
+	u32 reg0, reg1;
+	u32 comp_status = 1;
+
+	/*
+	 * Minimum 2 loops to read TLP headers and 1 loop to read data
+	 * payload.
+	 */
+	for (i = 0; i < TLP_LOOP; i++) {
+		ctrl = cra_readl(pcie, RP_RXCPL_STATUS);
+		if ((ctrl & RP_RXCPL_SOP) || (ctrl & RP_RXCPL_EOP) || sop) {
+			reg0 = cra_readl(pcie, RP_RXCPL_REG0);
+			reg1 = cra_readl(pcie, RP_RXCPL_REG1);
+
+			if (ctrl & RP_RXCPL_SOP) {
+				sop = true;
+				comp_status = TLP_COMP_STATUS(reg1);
+			}
+
+			if (ctrl & RP_RXCPL_EOP) {
+				if (comp_status)
+					return PCIBIOS_DEVICE_NOT_FOUND;
+
+				if (value)
+					*value = reg0;
+
+				return PCIBIOS_SUCCESSFUL;
+			}
+		}
+		udelay(5);
+	}
+
+	return PCIBIOS_DEVICE_NOT_FOUND;
+}
+
+static int s10_tlp_read_packet(struct altera_pcie *pcie, u32 *value)
+{
+	u32 ctrl;
+	u32 comp_status;
+	u32 dw[4];
+	u32 count;
+	struct device *dev = &pcie->pdev->dev;
+
+	for (count = 0; count < TLP_LOOP; count++) {
+		ctrl = cra_readl(pcie, S10_RP_RXCPL_STATUS);
+		if (ctrl & RP_RXCPL_SOP) {
+			/* Read first DW */
+			dw[0] = cra_readl(pcie, S10_RP_RXCPL_REG);
+			break;
+		}
+
+		udelay(5);
+	}
+
+	/* SOP detection failed, return error */
+	if (count == TLP_LOOP)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	count = 1;
+
+	/* Poll for EOP */
+	while (count < ARRAY_SIZE(dw)) {
+		ctrl = cra_readl(pcie, S10_RP_RXCPL_STATUS);
+		dw[count++] = cra_readl(pcie, S10_RP_RXCPL_REG);
+		if (ctrl & RP_RXCPL_EOP) {
+			comp_status = TLP_COMP_STATUS(dw[1]);
+			if (comp_status)
+				return PCIBIOS_DEVICE_NOT_FOUND;
+
+			if (value && TLP_BYTE_COUNT(dw[1]) == sizeof(u32) &&
+			    count == 4)
+				*value = dw[3];
+
+			return PCIBIOS_SUCCESSFUL;
+		}
+	}
+
+	dev_warn(dev, "Malformed TLP packet\n");
+
+	return PCIBIOS_DEVICE_NOT_FOUND;
+}
+
+static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers,
+			     u32 data, bool align)
+{
+	struct tlp_rp_regpair_t tlp_rp_regdata;
+
+	tlp_rp_regdata.reg0 = headers[0];
+	tlp_rp_regdata.reg1 = headers[1];
+	tlp_rp_regdata.ctrl = RP_TX_SOP;
+	tlp_write_tx(pcie, &tlp_rp_regdata);
+
+	if (align) {
+		tlp_rp_regdata.reg0 = headers[2];
+		tlp_rp_regdata.reg1 = 0;
+		tlp_rp_regdata.ctrl = 0;
+		tlp_write_tx(pcie, &tlp_rp_regdata);
+
+		tlp_rp_regdata.reg0 = data;
+		tlp_rp_regdata.reg1 = 0;
+	} else {
+		tlp_rp_regdata.reg0 = headers[2];
+		tlp_rp_regdata.reg1 = data;
+	}
+
+	tlp_rp_regdata.ctrl = RP_TX_EOP;
+	tlp_write_tx(pcie, &tlp_rp_regdata);
+}
+
+static void s10_tlp_write_packet(struct altera_pcie *pcie, u32 *headers,
+				 u32 data, bool dummy)
+{
+	s10_tlp_write_tx(pcie, headers[0], RP_TX_SOP);
+	s10_tlp_write_tx(pcie, headers[1], 0);
+	s10_tlp_write_tx(pcie, headers[2], 0);
+	s10_tlp_write_tx(pcie, data, RP_TX_EOP);
+}
+
+static void get_tlp_header(struct altera_pcie *pcie, u8 bus, u32 devfn,
+			   int where, u8 byte_en, bool read, u32 *headers)
+{
+	u8 cfg;
+	u8 cfg0 = read ? pcie->pcie_data->cfgrd0 : pcie->pcie_data->cfgwr0;
+	u8 cfg1 = read ? pcie->pcie_data->cfgrd1 : pcie->pcie_data->cfgwr1;
+	u8 tag = read ? TLP_READ_TAG : TLP_WRITE_TAG;
+
+	if (pcie->pcie_data->version == ALTERA_PCIE_V1)
+		cfg = (bus == pcie->root_bus_nr) ? cfg0 : cfg1;
+	else
+		cfg = (bus > S10_RP_SECONDARY(pcie)) ? cfg0 : cfg1;
+
+	headers[0] = TLP_CFG_DW0(pcie, cfg);
+	headers[1] = TLP_CFG_DW1(pcie, tag, byte_en);
+	headers[2] = TLP_CFG_DW2(bus, devfn, where);
+}
+
+static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn,
+			      int where, u8 byte_en, u32 *value)
+{
+	u32 headers[TLP_HDR_SIZE];
+
+	get_tlp_header(pcie, bus, devfn, where, byte_en, true,
+		       headers);
+
+	pcie->pcie_data->ops->tlp_write_pkt(pcie, headers, 0, false);
+
+	return pcie->pcie_data->ops->tlp_read_pkt(pcie, value);
+}
+
+static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
+			       int where, u8 byte_en, u32 value)
+{
+	u32 headers[TLP_HDR_SIZE];
+	int ret;
+
+	get_tlp_header(pcie, bus, devfn, where, byte_en, false,
+		       headers);
+
+	/* check alignment to Qword */
+	if ((where & 0x7) == 0)
+		pcie->pcie_data->ops->tlp_write_pkt(pcie, headers,
+						    value, true);
+	else
+		pcie->pcie_data->ops->tlp_write_pkt(pcie, headers,
+						    value, false);
+
+	ret = pcie->pcie_data->ops->tlp_read_pkt(pcie, NULL);
+	if (ret != PCIBIOS_SUCCESSFUL)
+		return ret;
+
+	/*
+	 * Monitor changes to PCI_PRIMARY_BUS register on root port
+	 * and update local copy of root bus number accordingly.
+	 */
+	if ((bus == pcie->root_bus_nr) && (where == PCI_PRIMARY_BUS))
+		pcie->root_bus_nr = (u8)(value);
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int s10_rp_read_cfg(struct altera_pcie *pcie, int where,
+			   int size, u32 *value)
+{
+	void __iomem *addr = S10_RP_CFG_ADDR(pcie, where);
+
+	switch (size) {
+	case 1:
+		*value = readb(addr);
+		break;
+	case 2:
+		*value = readw(addr);
+		break;
+	default:
+		*value = readl(addr);
+		break;
+	}
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int s10_rp_write_cfg(struct altera_pcie *pcie, u8 busno,
+			    int where, int size, u32 value)
+{
+	void __iomem *addr = S10_RP_CFG_ADDR(pcie, where);
+
+	switch (size) {
+	case 1:
+		writeb(value, addr);
+		break;
+	case 2:
+		writew(value, addr);
+		break;
+	default:
+		writel(value, addr);
+		break;
+	}
+
+	/*
+	 * Monitor changes to PCI_PRIMARY_BUS register on root port
+	 * and update local copy of root bus number accordingly.
+	 */
+	if (busno == pcie->root_bus_nr && where == PCI_PRIMARY_BUS)
+		pcie->root_bus_nr = value & 0xff;
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno,
+				 unsigned int devfn, int where, int size,
+				 u32 *value)
+{
+	int ret;
+	u32 data;
+	u8 byte_en;
+
+	if (busno == pcie->root_bus_nr && pcie->pcie_data->ops->rp_read_cfg)
+		return pcie->pcie_data->ops->rp_read_cfg(pcie, where,
+							 size, value);
+
+	switch (size) {
+	case 1:
+		byte_en = 1 << (where & 3);
+		break;
+	case 2:
+		byte_en = 3 << (where & 3);
+		break;
+	default:
+		byte_en = 0xf;
+		break;
+	}
+
+	ret = tlp_cfg_dword_read(pcie, busno, devfn,
+				 (where & ~DWORD_MASK), byte_en, &data);
+	if (ret != PCIBIOS_SUCCESSFUL)
+		return ret;
+
+	switch (size) {
+	case 1:
+		*value = (data >> (8 * (where & 0x3))) & 0xff;
+		break;
+	case 2:
+		*value = (data >> (8 * (where & 0x2))) & 0xffff;
+		break;
+	default:
+		*value = data;
+		break;
+	}
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno,
+				  unsigned int devfn, int where, int size,
+				  u32 value)
+{
+	u32 data32;
+	u32 shift = 8 * (where & 3);
+	u8 byte_en;
+
+	if (busno == pcie->root_bus_nr && pcie->pcie_data->ops->rp_write_cfg)
+		return pcie->pcie_data->ops->rp_write_cfg(pcie, busno,
+						     where, size, value);
+
+	switch (size) {
+	case 1:
+		data32 = (value & 0xff) << shift;
+		byte_en = 1 << (where & 3);
+		break;
+	case 2:
+		data32 = (value & 0xffff) << shift;
+		byte_en = 3 << (where & 3);
+		break;
+	default:
+		data32 = value;
+		byte_en = 0xf;
+		break;
+	}
+
+	return tlp_cfg_dword_write(pcie, busno, devfn, (where & ~DWORD_MASK),
+				   byte_en, data32);
+}
+
+static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
+				int where, int size, u32 *value)
+{
+	struct altera_pcie *pcie = bus->sysdata;
+
+	if (altera_pcie_hide_rc_bar(bus, devfn, where))
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+
+	if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn))) {
+		*value = 0xffffffff;
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+
+	return _altera_pcie_cfg_read(pcie, bus->number, devfn, where, size,
+				     value);
+}
+
+static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn,
+				 int where, int size, u32 value)
+{
+	struct altera_pcie *pcie = bus->sysdata;
+
+	if (altera_pcie_hide_rc_bar(bus, devfn, where))
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+
+	if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn)))
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	return _altera_pcie_cfg_write(pcie, bus->number, devfn, where, size,
+				     value);
+}
+
+static struct pci_ops altera_pcie_ops = {
+	.read = altera_pcie_cfg_read,
+	.write = altera_pcie_cfg_write,
+};
+
+static int altera_read_cap_word(struct altera_pcie *pcie, u8 busno,
+				unsigned int devfn, int offset, u16 *value)
+{
+	u32 data;
+	int ret;
+
+	ret = _altera_pcie_cfg_read(pcie, busno, devfn,
+				    pcie->pcie_data->cap_offset + offset,
+				    sizeof(*value),
+				    &data);
+	*value = data;
+	return ret;
+}
+
+static int altera_write_cap_word(struct altera_pcie *pcie, u8 busno,
+				 unsigned int devfn, int offset, u16 value)
+{
+	return _altera_pcie_cfg_write(pcie, busno, devfn,
+				      pcie->pcie_data->cap_offset + offset,
+				      sizeof(value),
+				      value);
+}
+
+static void altera_wait_link_retrain(struct altera_pcie *pcie)
+{
+	struct device *dev = &pcie->pdev->dev;
+	u16 reg16;
+	unsigned long start_jiffies;
+
+	/* Wait for link training end. */
+	start_jiffies = jiffies;
+	for (;;) {
+		altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
+				     PCI_EXP_LNKSTA, &reg16);
+		if (!(reg16 & PCI_EXP_LNKSTA_LT))
+			break;
+
+		if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) {
+			dev_err(dev, "link retrain timeout\n");
+			break;
+		}
+		udelay(100);
+	}
+
+	/* Wait for link is up */
+	start_jiffies = jiffies;
+	for (;;) {
+		if (pcie->pcie_data->ops->get_link_status(pcie))
+			break;
+
+		if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) {
+			dev_err(dev, "link up timeout\n");
+			break;
+		}
+		udelay(100);
+	}
+}
+
+static void altera_pcie_retrain(struct altera_pcie *pcie)
+{
+	u16 linkcap, linkstat, linkctl;
+
+	if (!pcie->pcie_data->ops->get_link_status(pcie))
+		return;
+
+	/*
+	 * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but
+	 * current speed is 2.5 GB/s.
+	 */
+	altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKCAP,
+			     &linkcap);
+	if ((linkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
+		return;
+
+	altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKSTA,
+			     &linkstat);
+	if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
+		altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
+				     PCI_EXP_LNKCTL, &linkctl);
+		linkctl |= PCI_EXP_LNKCTL_RL;
+		altera_write_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
+				      PCI_EXP_LNKCTL, linkctl);
+
+		altera_wait_link_retrain(pcie);
+	}
+}
+
+static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+				irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+	irq_set_chip_data(irq, domain->host_data);
+	return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+	.map = altera_pcie_intx_map,
+	.xlate = pci_irqd_intx_xlate,
+};
+
+static void altera_pcie_isr(struct irq_desc *desc)
+{
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct altera_pcie *pcie;
+	struct device *dev;
+	unsigned long status;
+	u32 bit;
+	u32 virq;
+
+	chained_irq_enter(chip, desc);
+	pcie = irq_desc_get_handler_data(desc);
+	dev = &pcie->pdev->dev;
+
+	while ((status = cra_readl(pcie, P2A_INT_STATUS)
+		& P2A_INT_STS_ALL) != 0) {
+		for_each_set_bit(bit, &status, PCI_NUM_INTX) {
+			/* clear interrupts */
+			cra_writel(pcie, 1 << bit, P2A_INT_STATUS);
+
+			virq = irq_find_mapping(pcie->irq_domain, bit);
+			if (virq)
+				generic_handle_irq(virq);
+			else
+				dev_err(dev, "unexpected IRQ, INT%d\n", bit);
+		}
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+static int altera_pcie_parse_request_of_pci_ranges(struct altera_pcie *pcie)
+{
+	int err, res_valid = 0;
+	struct device *dev = &pcie->pdev->dev;
+	struct resource_entry *win;
+
+	err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+						    &pcie->resources, NULL);
+	if (err)
+		return err;
+
+	err = devm_request_pci_bus_resources(dev, &pcie->resources);
+	if (err)
+		goto out_release_res;
+
+	resource_list_for_each_entry(win, &pcie->resources) {
+		struct resource *res = win->res;
+
+		if (resource_type(res) == IORESOURCE_MEM)
+			res_valid |= !(res->flags & IORESOURCE_PREFETCH);
+	}
+
+	if (res_valid)
+		return 0;
+
+	dev_err(dev, "non-prefetchable memory resource required\n");
+	err = -EINVAL;
+
+out_release_res:
+	pci_free_resource_list(&pcie->resources);
+	return err;
+}
+
+static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
+{
+	struct device *dev = &pcie->pdev->dev;
+	struct device_node *node = dev->of_node;
+
+	/* Setup INTx */
+	pcie->irq_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
+					&intx_domain_ops, pcie);
+	if (!pcie->irq_domain) {
+		dev_err(dev, "Failed to get a INTx IRQ domain\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void altera_pcie_irq_teardown(struct altera_pcie *pcie)
+{
+	irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
+	irq_domain_remove(pcie->irq_domain);
+	irq_dispose_mapping(pcie->irq);
+}
+
+static int altera_pcie_parse_dt(struct altera_pcie *pcie)
+{
+	struct device *dev = &pcie->pdev->dev;
+	struct platform_device *pdev = pcie->pdev;
+	struct resource *cra;
+	struct resource *hip;
+
+	cra = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Cra");
+	pcie->cra_base = devm_ioremap_resource(dev, cra);
+	if (IS_ERR(pcie->cra_base))
+		return PTR_ERR(pcie->cra_base);
+
+	if (pcie->pcie_data->version == ALTERA_PCIE_V2) {
+		hip = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Hip");
+		pcie->hip_base = devm_ioremap_resource(&pdev->dev, hip);
+		if (IS_ERR(pcie->hip_base))
+			return PTR_ERR(pcie->hip_base);
+	}
+
+	/* setup IRQ */
+	pcie->irq = platform_get_irq(pdev, 0);
+	if (pcie->irq < 0) {
+		dev_err(dev, "failed to get IRQ: %d\n", pcie->irq);
+		return pcie->irq;
+	}
+
+	irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie);
+	return 0;
+}
+
+static void altera_pcie_host_init(struct altera_pcie *pcie)
+{
+	altera_pcie_retrain(pcie);
+}
+
+static const struct altera_pcie_ops altera_pcie_ops_1_0 = {
+	.tlp_read_pkt = tlp_read_packet,
+	.tlp_write_pkt = tlp_write_packet,
+	.get_link_status = altera_pcie_link_up,
+};
+
+static const struct altera_pcie_ops altera_pcie_ops_2_0 = {
+	.tlp_read_pkt = s10_tlp_read_packet,
+	.tlp_write_pkt = s10_tlp_write_packet,
+	.get_link_status = s10_altera_pcie_link_up,
+	.rp_read_cfg = s10_rp_read_cfg,
+	.rp_write_cfg = s10_rp_write_cfg,
+};
+
+static const struct altera_pcie_data altera_pcie_1_0_data = {
+	.ops = &altera_pcie_ops_1_0,
+	.cap_offset = 0x80,
+	.version = ALTERA_PCIE_V1,
+	.cfgrd0 = TLP_FMTTYPE_CFGRD0,
+	.cfgrd1 = TLP_FMTTYPE_CFGRD1,
+	.cfgwr0 = TLP_FMTTYPE_CFGWR0,
+	.cfgwr1 = TLP_FMTTYPE_CFGWR1,
+};
+
+static const struct altera_pcie_data altera_pcie_2_0_data = {
+	.ops = &altera_pcie_ops_2_0,
+	.version = ALTERA_PCIE_V2,
+	.cap_offset = 0x70,
+	.cfgrd0 = S10_TLP_FMTTYPE_CFGRD0,
+	.cfgrd1 = S10_TLP_FMTTYPE_CFGRD1,
+	.cfgwr0 = S10_TLP_FMTTYPE_CFGWR0,
+	.cfgwr1 = S10_TLP_FMTTYPE_CFGWR1,
+};
+
+static const struct of_device_id altera_pcie_of_match[] = {
+	{.compatible = "altr,pcie-root-port-1.0",
+	 .data = &altera_pcie_1_0_data },
+	{.compatible = "altr,pcie-root-port-2.0",
+	 .data = &altera_pcie_2_0_data },
+	{},
+};
+
+static int altera_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct altera_pcie *pcie;
+	struct pci_bus *bus;
+	struct pci_bus *child;
+	struct pci_host_bridge *bridge;
+	int ret;
+	const struct of_device_id *match;
+
+	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+	if (!bridge)
+		return -ENOMEM;
+
+	pcie = pci_host_bridge_priv(bridge);
+	pcie->pdev = pdev;
+	platform_set_drvdata(pdev, pcie);
+
+	match = of_match_device(altera_pcie_of_match, &pdev->dev);
+	if (!match)
+		return -ENODEV;
+
+	pcie->pcie_data = match->data;
+
+	ret = altera_pcie_parse_dt(pcie);
+	if (ret) {
+		dev_err(dev, "Parsing DT failed\n");
+		return ret;
+	}
+
+	INIT_LIST_HEAD(&pcie->resources);
+
+	ret = altera_pcie_parse_request_of_pci_ranges(pcie);
+	if (ret) {
+		dev_err(dev, "Failed add resources\n");
+		return ret;
+	}
+
+	ret = altera_pcie_init_irq_domain(pcie);
+	if (ret) {
+		dev_err(dev, "Failed creating IRQ Domain\n");
+		return ret;
+	}
+
+	/* clear all interrupts */
+	cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS);
+	/* enable all interrupts */
+	cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
+	altera_pcie_host_init(pcie);
+
+	list_splice_init(&pcie->resources, &bridge->windows);
+	bridge->dev.parent = dev;
+	bridge->sysdata = pcie;
+	bridge->busnr = pcie->root_bus_nr;
+	bridge->ops = &altera_pcie_ops;
+	bridge->map_irq = of_irq_parse_and_map_pci;
+	bridge->swizzle_irq = pci_common_swizzle;
+
+	ret = pci_scan_root_bus_bridge(bridge);
+	if (ret < 0)
+		return ret;
+
+	bus = bridge->bus;
+
+	pci_assign_unassigned_bus_resources(bus);
+
+	/* Configure PCI Express setting. */
+	list_for_each_entry(child, &bus->children, node)
+		pcie_bus_configure_settings(child);
+
+	pci_bus_add_devices(bus);
+	return ret;
+}
+
+static int altera_pcie_remove(struct platform_device *pdev)
+{
+	struct altera_pcie *pcie = platform_get_drvdata(pdev);
+	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+
+	pci_stop_root_bus(bridge->bus);
+	pci_remove_root_bus(bridge->bus);
+	pci_free_resource_list(&pcie->resources);
+	altera_pcie_irq_teardown(pcie);
+
+	return 0;
+}
+
+static struct platform_driver altera_pcie_driver = {
+	.probe		= altera_pcie_probe,
+	.remove		= altera_pcie_remove,
+	.driver = {
+		.name	= "altera-pcie",
+		.of_match_table = altera_pcie_of_match,
+	},
+};
+
+MODULE_DEVICE_TABLE(of, altera_pcie_of_match);
+module_platform_driver(altera_pcie_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/marvell/linux/drivers/pci/controller/pcie-cadence-ep.c b/marvell/linux/drivers/pci/controller/pcie-cadence-ep.c
new file mode 100644
index 0000000..5e23d57
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pcie-cadence-ep.c
@@ -0,0 +1,564 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Cadence
+// Cadence PCIe endpoint controller driver.
+// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/pci-epc.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
+
+#include "pcie-cadence.h"
+
+#define CDNS_PCIE_EP_MIN_APERTURE		128	/* 128 bytes */
+#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE		0x1
+#define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY	0x3
+
+/**
+ * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver
+ * @pcie: Cadence PCIe controller
+ * @max_regions: maximum number of regions supported by hardware
+ * @ob_region_map: bitmask of mapped outbound regions
+ * @ob_addr: base addresses in the AXI bus where the outbound regions start
+ * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
+ *		   dedicated outbound regions is mapped.
+ * @irq_cpu_addr: base address in the CPU space where a write access triggers
+ *		  the sending of a memory write (MSI) / normal message (legacy
+ *		  IRQ) TLP through the PCIe bus.
+ * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
+ *		  dedicated outbound region.
+ * @irq_pci_fn: the latest PCI function that has updated the mapping of
+ *		the MSI/legacy IRQ dedicated outbound region.
+ * @irq_pending: bitmask of asserted legacy IRQs.
+ */
+struct cdns_pcie_ep {
+	struct cdns_pcie		pcie;
+	u32				max_regions;
+	unsigned long			ob_region_map;
+	phys_addr_t			*ob_addr;
+	phys_addr_t			irq_phys_addr;
+	void __iomem			*irq_cpu_addr;
+	u64				irq_pci_addr;
+	u8				irq_pci_fn;
+	u8				irq_pending;
+};
+
+static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
+				     struct pci_epf_header *hdr)
+{
+	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+	struct cdns_pcie *pcie = &ep->pcie;
+
+	cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid);
+	cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid);
+	cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, hdr->progif_code);
+	cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE,
+			       hdr->subclass_code | hdr->baseclass_code << 8);
+	cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE,
+			       hdr->cache_line_size);
+	cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, hdr->subsys_id);
+	cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, hdr->interrupt_pin);
+
+	/*
+	 * Vendor ID can only be modified from function 0, all other functions
+	 * use the same vendor ID as function 0.
+	 */
+	if (fn == 0) {
+		/* Update the vendor IDs. */
+		u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) |
+			 CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id);
+
+		cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
+	}
+
+	return 0;
+}
+
+static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
+				struct pci_epf_bar *epf_bar)
+{
+	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+	struct cdns_pcie *pcie = &ep->pcie;
+	dma_addr_t bar_phys = epf_bar->phys_addr;
+	enum pci_barno bar = epf_bar->barno;
+	int flags = epf_bar->flags;
+	u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
+	u64 sz;
+
+	/* BAR size is 2^(aperture + 7) */
+	sz = max_t(size_t, epf_bar->size, CDNS_PCIE_EP_MIN_APERTURE);
+	/*
+	 * roundup_pow_of_two() returns an unsigned long, which is not suited
+	 * for 64bit values.
+	 */
+	sz = 1ULL << fls64(sz - 1);
+	aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
+
+	if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
+		ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS;
+	} else {
+		bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
+		bool is_64bits = sz > SZ_2G;
+
+		if (is_64bits && (bar & 1))
+			return -EINVAL;
+
+		if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
+			epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+
+		if (is_64bits && is_prefetch)
+			ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
+		else if (is_prefetch)
+			ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
+		else if (is_64bits)
+			ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS;
+		else
+			ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS;
+	}
+
+	addr0 = lower_32_bits(bar_phys);
+	addr1 = upper_32_bits(bar_phys);
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar),
+			 addr0);
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar),
+			 addr1);
+
+	if (bar < BAR_4) {
+		reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
+		b = bar;
+	} else {
+		reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
+		b = bar - BAR_4;
+	}
+
+	cfg = cdns_pcie_readl(pcie, reg);
+	cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
+		 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
+	cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
+		CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
+	cdns_pcie_writel(pcie, reg, cfg);
+
+	return 0;
+}
+
+static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
+				   struct pci_epf_bar *epf_bar)
+{
+	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+	struct cdns_pcie *pcie = &ep->pcie;
+	enum pci_barno bar = epf_bar->barno;
+	u32 reg, cfg, b, ctrl;
+
+	if (bar < BAR_4) {
+		reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
+		b = bar;
+	} else {
+		reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
+		b = bar - BAR_4;
+	}
+
+	ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
+	cfg = cdns_pcie_readl(pcie, reg);
+	cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
+		 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
+	cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
+	cdns_pcie_writel(pcie, reg, cfg);
+
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0);
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0);
+}
+
+static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr,
+				 u64 pci_addr, size_t size)
+{
+	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+	struct cdns_pcie *pcie = &ep->pcie;
+	u32 r;
+
+	r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG);
+	if (r >= ep->max_regions - 1) {
+		dev_err(&epc->dev, "no free outbound region\n");
+		return -EINVAL;
+	}
+
+	cdns_pcie_set_outbound_region(pcie, fn, r, false, addr, pci_addr, size);
+
+	set_bit(r, &ep->ob_region_map);
+	ep->ob_addr[r] = addr;
+
+	return 0;
+}
+
+static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
+				    phys_addr_t addr)
+{
+	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+	struct cdns_pcie *pcie = &ep->pcie;
+	u32 r;
+
+	for (r = 0; r < ep->max_regions - 1; r++)
+		if (ep->ob_addr[r] == addr)
+			break;
+
+	if (r == ep->max_regions - 1)
+		return;
+
+	cdns_pcie_reset_outbound_region(pcie, r);
+
+	ep->ob_addr[r] = 0;
+	clear_bit(r, &ep->ob_region_map);
+}
+
+static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc)
+{
+	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+	struct cdns_pcie *pcie = &ep->pcie;
+	u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
+	u16 flags;
+
+	/*
+	 * Set the Multiple Message Capable bitfield into the Message Control
+	 * register.
+	 */
+	flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
+	flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1);
+	flags |= PCI_MSI_FLAGS_64BIT;
+	flags &= ~PCI_MSI_FLAGS_MASKBIT;
+	cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags);
+
+	return 0;
+}
+
+static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
+{
+	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+	struct cdns_pcie *pcie = &ep->pcie;
+	u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
+	u16 flags, mme;
+
+	/* Validate that the MSI feature is actually enabled. */
+	flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
+	if (!(flags & PCI_MSI_FLAGS_ENABLE))
+		return -EINVAL;
+
+	/*
+	 * Get the Multiple Message Enable bitfield from the Message Control
+	 * register.
+	 */
+	mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
+
+	return mme;
+}
+
+static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
+				     u8 intx, bool is_asserted)
+{
+	struct cdns_pcie *pcie = &ep->pcie;
+	u32 offset;
+	u16 status;
+	u8 msg_code;
+
+	intx &= 3;
+
+	/* Set the outbound region if needed. */
+	if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY ||
+		     ep->irq_pci_fn != fn)) {
+		/* First region was reserved for IRQ writes. */
+		cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, 0,
+							     ep->irq_phys_addr);
+		ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY;
+		ep->irq_pci_fn = fn;
+	}
+
+	if (is_asserted) {
+		ep->irq_pending |= BIT(intx);
+		msg_code = MSG_CODE_ASSERT_INTA + intx;
+	} else {
+		ep->irq_pending &= ~BIT(intx);
+		msg_code = MSG_CODE_DEASSERT_INTA + intx;
+	}
+
+	status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS);
+	if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) {
+		status ^= PCI_STATUS_INTERRUPT;
+		cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status);
+	}
+
+	offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) |
+		 CDNS_PCIE_NORMAL_MSG_CODE(msg_code) |
+		 CDNS_PCIE_MSG_NO_DATA;
+	writel(0, ep->irq_cpu_addr + offset);
+}
+
+static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx)
+{
+	u16 cmd;
+
+	cmd = cdns_pcie_ep_fn_readw(&ep->pcie, fn, PCI_COMMAND);
+	if (cmd & PCI_COMMAND_INTX_DISABLE)
+		return -EINVAL;
+
+	cdns_pcie_ep_assert_intx(ep, fn, intx, true);
+	/*
+	 * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq()
+	 * from drivers/pci/dwc/pci-dra7xx.c
+	 */
+	mdelay(1);
+	cdns_pcie_ep_assert_intx(ep, fn, intx, false);
+	return 0;
+}
+
+static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
+				     u8 interrupt_num)
+{
+	struct cdns_pcie *pcie = &ep->pcie;
+	u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
+	u16 flags, mme, data, data_mask;
+	u8 msi_count;
+	u64 pci_addr, pci_addr_mask = 0xff;
+
+	/* Check whether the MSI feature has been enabled by the PCI host. */
+	flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
+	if (!(flags & PCI_MSI_FLAGS_ENABLE))
+		return -EINVAL;
+
+	/* Get the number of enabled MSIs */
+	mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
+	msi_count = 1 << mme;
+	if (!interrupt_num || interrupt_num > msi_count)
+		return -EINVAL;
+
+	/* Compute the data value to be written. */
+	data_mask = msi_count - 1;
+	data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64);
+	data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask);
+
+	/* Get the PCI address where to write the data into. */
+	pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI);
+	pci_addr <<= 32;
+	pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO);
+	pci_addr &= GENMASK_ULL(63, 2);
+
+	/* Set the outbound region if needed. */
+	if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
+		     ep->irq_pci_fn != fn)) {
+		/* First region was reserved for IRQ writes. */
+		cdns_pcie_set_outbound_region(pcie, fn, 0,
+					      false,
+					      ep->irq_phys_addr,
+					      pci_addr & ~pci_addr_mask,
+					      pci_addr_mask + 1);
+		ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
+		ep->irq_pci_fn = fn;
+	}
+	writel(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
+
+	return 0;
+}
+
+static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
+				  enum pci_epc_irq_type type,
+				  u16 interrupt_num)
+{
+	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+
+	switch (type) {
+	case PCI_EPC_IRQ_LEGACY:
+		return cdns_pcie_ep_send_legacy_irq(ep, fn, 0);
+
+	case PCI_EPC_IRQ_MSI:
+		return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
+
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static int cdns_pcie_ep_start(struct pci_epc *epc)
+{
+	struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+	struct cdns_pcie *pcie = &ep->pcie;
+	struct pci_epf *epf;
+	u32 cfg;
+
+	/*
+	 * BIT(0) is hardwired to 1, hence function 0 is always enabled
+	 * and can't be disabled anyway.
+	 */
+	cfg = BIT(0);
+	list_for_each_entry(epf, &epc->pci_epf, list)
+		cfg |= BIT(epf->func_no);
+	cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg);
+
+	return 0;
+}
+
+static const struct pci_epc_features cdns_pcie_epc_features = {
+	.linkup_notifier = false,
+	.msi_capable = true,
+	.msix_capable = false,
+};
+
+static const struct pci_epc_features*
+cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+{
+	return &cdns_pcie_epc_features;
+}
+
+static const struct pci_epc_ops cdns_pcie_epc_ops = {
+	.write_header	= cdns_pcie_ep_write_header,
+	.set_bar	= cdns_pcie_ep_set_bar,
+	.clear_bar	= cdns_pcie_ep_clear_bar,
+	.map_addr	= cdns_pcie_ep_map_addr,
+	.unmap_addr	= cdns_pcie_ep_unmap_addr,
+	.set_msi	= cdns_pcie_ep_set_msi,
+	.get_msi	= cdns_pcie_ep_get_msi,
+	.raise_irq	= cdns_pcie_ep_raise_irq,
+	.start		= cdns_pcie_ep_start,
+	.get_features	= cdns_pcie_ep_get_features,
+};
+
+static const struct of_device_id cdns_pcie_ep_of_match[] = {
+	{ .compatible = "cdns,cdns-pcie-ep" },
+
+	{ },
+};
+
+static int cdns_pcie_ep_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct cdns_pcie_ep *ep;
+	struct cdns_pcie *pcie;
+	struct pci_epc *epc;
+	struct resource *res;
+	int ret;
+	int phy_count;
+
+	ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+	if (!ep)
+		return -ENOMEM;
+
+	pcie = &ep->pcie;
+	pcie->is_rc = false;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
+	pcie->reg_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pcie->reg_base)) {
+		dev_err(dev, "missing \"reg\"\n");
+		return PTR_ERR(pcie->reg_base);
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
+	if (!res) {
+		dev_err(dev, "missing \"mem\"\n");
+		return -EINVAL;
+	}
+	pcie->mem_res = res;
+
+	ret = of_property_read_u32(np, "cdns,max-outbound-regions",
+				   &ep->max_regions);
+	if (ret < 0) {
+		dev_err(dev, "missing \"cdns,max-outbound-regions\"\n");
+		return ret;
+	}
+	ep->ob_addr = devm_kcalloc(dev,
+				   ep->max_regions, sizeof(*ep->ob_addr),
+				   GFP_KERNEL);
+	if (!ep->ob_addr)
+		return -ENOMEM;
+
+	ret = cdns_pcie_init_phy(dev, pcie);
+	if (ret) {
+		dev_err(dev, "failed to init phy\n");
+		return ret;
+	}
+	platform_set_drvdata(pdev, pcie);
+	pm_runtime_enable(dev);
+	ret = pm_runtime_get_sync(dev);
+	if (ret < 0) {
+		dev_err(dev, "pm_runtime_get_sync() failed\n");
+		goto err_get_sync;
+	}
+
+	/* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */
+	cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0));
+
+	epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops);
+	if (IS_ERR(epc)) {
+		dev_err(dev, "failed to create epc device\n");
+		ret = PTR_ERR(epc);
+		goto err_init;
+	}
+
+	epc_set_drvdata(epc, ep);
+
+	if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0)
+		epc->max_functions = 1;
+
+	ret = pci_epc_mem_init(epc, pcie->mem_res->start,
+			       resource_size(pcie->mem_res));
+	if (ret < 0) {
+		dev_err(dev, "failed to initialize the memory space\n");
+		goto err_init;
+	}
+
+	ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
+						  SZ_128K);
+	if (!ep->irq_cpu_addr) {
+		dev_err(dev, "failed to reserve memory space for MSI\n");
+		ret = -ENOMEM;
+		goto free_epc_mem;
+	}
+	ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE;
+	/* Reserve region 0 for IRQs */
+	set_bit(0, &ep->ob_region_map);
+
+	return 0;
+
+ free_epc_mem:
+	pci_epc_mem_exit(epc);
+
+ err_init:
+	pm_runtime_put_sync(dev);
+
+ err_get_sync:
+	pm_runtime_disable(dev);
+	cdns_pcie_disable_phy(pcie);
+	phy_count = pcie->phy_count;
+	while (phy_count--)
+		device_link_del(pcie->link[phy_count]);
+
+	return ret;
+}
+
+static void cdns_pcie_ep_shutdown(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct cdns_pcie *pcie = dev_get_drvdata(dev);
+	int ret;
+
+	ret = pm_runtime_put_sync(dev);
+	if (ret < 0)
+		dev_dbg(dev, "pm_runtime_put_sync failed\n");
+
+	pm_runtime_disable(dev);
+
+	cdns_pcie_disable_phy(pcie);
+}
+
+static struct platform_driver cdns_pcie_ep_driver = {
+	.driver = {
+		.name = "cdns-pcie-ep",
+		.of_match_table = cdns_pcie_ep_of_match,
+		.pm	= &cdns_pcie_pm_ops,
+	},
+	.probe = cdns_pcie_ep_probe,
+	.shutdown = cdns_pcie_ep_shutdown,
+};
+builtin_platform_driver(cdns_pcie_ep_driver);
diff --git a/marvell/linux/drivers/pci/controller/pcie-cadence-host.c b/marvell/linux/drivers/pci/controller/pcie-cadence-host.c
new file mode 100644
index 0000000..0dfc778
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pcie-cadence-host.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Cadence
+// Cadence PCIe host controller driver.
+// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
+
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "pcie-cadence.h"
+
+/**
+ * struct cdns_pcie_rc - private data for this PCIe Root Complex driver
+ * @pcie: Cadence PCIe controller
+ * @dev: pointer to PCIe device
+ * @cfg_res: start/end offsets in the physical system memory to map PCI
+ *           configuration space accesses
+ * @bus_range: first/last buses behind the PCIe host controller
+ * @cfg_base: IO mapped window to access the PCI configuration space of a
+ *            single function at a time
+ * @max_regions: maximum number of regions supported by the hardware
+ * @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address
+ *                translation (nbits sets into the "no BAR match" register)
+ * @vendor_id: PCI vendor ID
+ * @device_id: PCI device ID
+ */
+struct cdns_pcie_rc {
+	struct cdns_pcie	pcie;
+	struct device		*dev;
+	struct resource		*cfg_res;
+	struct resource		*bus_range;
+	void __iomem		*cfg_base;
+	u32			max_regions;
+	u32			no_bar_nbits;
+	u16			vendor_id;
+	u16			device_id;
+};
+
+static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
+				      int where)
+{
+	struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
+	struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge);
+	struct cdns_pcie *pcie = &rc->pcie;
+	unsigned int busn = bus->number;
+	u32 addr0, desc0;
+
+	if (busn == rc->bus_range->start) {
+		/*
+		 * Only the root port (devfn == 0) is connected to this bus.
+		 * All other PCI devices are behind some bridge hence on another
+		 * bus.
+		 */
+		if (devfn)
+			return NULL;
+
+		return pcie->reg_base + (where & 0xfff);
+	}
+	/* Check that the link is up */
+	if (!(cdns_pcie_readl(pcie, CDNS_PCIE_LM_BASE) & 0x1))
+		return NULL;
+	/* Clear AXI link-down status */
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_LINKDOWN, 0x0);
+
+	/* Update Output registers for AXI region 0. */
+	addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(12) |
+		CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) |
+		CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(busn);
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(0), addr0);
+
+	/* Configuration Type 0 or Type 1 access. */
+	desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
+		CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
+	/*
+	 * The bus number was already set once for all in desc1 by
+	 * cdns_pcie_host_init_address_translation().
+	 */
+	if (busn == rc->bus_range->start + 1)
+		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0;
+	else
+		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1;
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(0), desc0);
+
+	return rc->cfg_base + (where & 0xfff);
+}
+
+static struct pci_ops cdns_pcie_host_ops = {
+	.map_bus	= cdns_pci_map_bus,
+	.read		= pci_generic_config_read,
+	.write		= pci_generic_config_write,
+};
+
+static const struct of_device_id cdns_pcie_host_of_match[] = {
+	{ .compatible = "cdns,cdns-pcie-host" },
+
+	{ },
+};
+
+static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
+{
+	struct cdns_pcie *pcie = &rc->pcie;
+	u32 value, ctrl;
+	u32 id;
+
+	/*
+	 * Set the root complex BAR configuration register:
+	 * - disable both BAR0 and BAR1.
+	 * - enable Prefetchable Memory Base and Limit registers in type 1
+	 *   config space (64 bits).
+	 * - enable IO Base and Limit registers in type 1 config
+	 *   space (32 bits).
+	 */
+	ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
+	value = CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
+		CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) |
+		CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE |
+		CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS |
+		CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE |
+		CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS;
+	cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
+
+	/* Set root port configuration space */
+	if (rc->vendor_id != 0xffff) {
+		id = CDNS_PCIE_LM_ID_VENDOR(rc->vendor_id) |
+			CDNS_PCIE_LM_ID_SUBSYS(rc->vendor_id);
+		cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
+	}
+
+	if (rc->device_id != 0xffff)
+		cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id);
+
+	cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0);
+	cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0);
+	cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
+
+	return 0;
+}
+
+static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
+{
+	struct cdns_pcie *pcie = &rc->pcie;
+	struct resource *cfg_res = rc->cfg_res;
+	struct resource *mem_res = pcie->mem_res;
+	struct resource *bus_range = rc->bus_range;
+	struct device *dev = rc->dev;
+	struct device_node *np = dev->of_node;
+	struct of_pci_range_parser parser;
+	struct of_pci_range range;
+	u32 addr0, addr1, desc1;
+	u64 cpu_addr;
+	int r, err;
+
+	/*
+	 * Reserve region 0 for PCI configure space accesses:
+	 * OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated dynamically by
+	 * cdns_pci_map_bus(), other region registers are set here once for all.
+	 */
+	addr1 = 0; /* Should be programmed to zero. */
+	desc1 = CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus_range->start);
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1);
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1);
+
+	cpu_addr = cfg_res->start - mem_res->start;
+	addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) |
+		(lower_32_bits(cpu_addr) & GENMASK(31, 8));
+	addr1 = upper_32_bits(cpu_addr);
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(0), addr0);
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(0), addr1);
+
+	err = of_pci_range_parser_init(&parser, np);
+	if (err)
+		return err;
+
+	r = 1;
+	for_each_of_pci_range(&parser, &range) {
+		bool is_io;
+
+		if (r >= rc->max_regions)
+			break;
+
+		if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM)
+			is_io = false;
+		else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
+			is_io = true;
+		else
+			continue;
+
+		cdns_pcie_set_outbound_region(pcie, 0, r, is_io,
+					      range.cpu_addr,
+					      range.pci_addr,
+					      range.size);
+		r++;
+	}
+
+	/*
+	 * Set Root Port no BAR match Inbound Translation registers:
+	 * needed for MSI and DMA.
+	 * Root Port BAR0 and BAR1 are disabled, hence no need to set their
+	 * inbound translation registers.
+	 */
+	addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(rc->no_bar_nbits);
+	addr1 = 0;
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(RP_NO_BAR), addr0);
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(RP_NO_BAR), addr1);
+
+	return 0;
+}
+
+static int cdns_pcie_host_init(struct device *dev,
+			       struct list_head *resources,
+			       struct cdns_pcie_rc *rc)
+{
+	struct resource *bus_range = NULL;
+	int err;
+
+	/* Parse our PCI ranges and request their resources */
+	err = pci_parse_request_of_pci_ranges(dev, resources, &bus_range);
+	if (err)
+		return err;
+
+	rc->bus_range = bus_range;
+	rc->pcie.bus = bus_range->start;
+
+	err = cdns_pcie_host_init_root_port(rc);
+	if (err)
+		goto err_out;
+
+	err = cdns_pcie_host_init_address_translation(rc);
+	if (err)
+		goto err_out;
+
+	return 0;
+
+ err_out:
+	pci_free_resource_list(resources);
+	return err;
+}
+
+static int cdns_pcie_host_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct pci_host_bridge *bridge;
+	struct list_head resources;
+	struct cdns_pcie_rc *rc;
+	struct cdns_pcie *pcie;
+	struct resource *res;
+	int ret;
+	int phy_count;
+
+	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
+	if (!bridge)
+		return -ENOMEM;
+
+	rc = pci_host_bridge_priv(bridge);
+	rc->dev = dev;
+
+	pcie = &rc->pcie;
+	pcie->is_rc = true;
+
+	rc->max_regions = 32;
+	of_property_read_u32(np, "cdns,max-outbound-regions", &rc->max_regions);
+
+	rc->no_bar_nbits = 32;
+	of_property_read_u32(np, "cdns,no-bar-match-nbits", &rc->no_bar_nbits);
+
+	rc->vendor_id = 0xffff;
+	of_property_read_u16(np, "vendor-id", &rc->vendor_id);
+
+	rc->device_id = 0xffff;
+	of_property_read_u16(np, "device-id", &rc->device_id);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
+	pcie->reg_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pcie->reg_base)) {
+		dev_err(dev, "missing \"reg\"\n");
+		return PTR_ERR(pcie->reg_base);
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+	rc->cfg_base = devm_pci_remap_cfg_resource(dev, res);
+	if (IS_ERR(rc->cfg_base)) {
+		dev_err(dev, "missing \"cfg\"\n");
+		return PTR_ERR(rc->cfg_base);
+	}
+	rc->cfg_res = res;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
+	if (!res) {
+		dev_err(dev, "missing \"mem\"\n");
+		return -EINVAL;
+	}
+	pcie->mem_res = res;
+
+	ret = cdns_pcie_init_phy(dev, pcie);
+	if (ret) {
+		dev_err(dev, "failed to init phy\n");
+		return ret;
+	}
+	platform_set_drvdata(pdev, pcie);
+
+	pm_runtime_enable(dev);
+	ret = pm_runtime_get_sync(dev);
+	if (ret < 0) {
+		dev_err(dev, "pm_runtime_get_sync() failed\n");
+		goto err_get_sync;
+	}
+
+	ret = cdns_pcie_host_init(dev, &resources, rc);
+	if (ret)
+		goto err_init;
+
+	list_splice_init(&resources, &bridge->windows);
+	bridge->dev.parent = dev;
+	bridge->busnr = pcie->bus;
+	bridge->ops = &cdns_pcie_host_ops;
+	bridge->map_irq = of_irq_parse_and_map_pci;
+	bridge->swizzle_irq = pci_common_swizzle;
+
+	ret = pci_host_probe(bridge);
+	if (ret < 0)
+		goto err_host_probe;
+
+	return 0;
+
+ err_host_probe:
+	pci_free_resource_list(&resources);
+
+ err_init:
+	pm_runtime_put_sync(dev);
+
+ err_get_sync:
+	pm_runtime_disable(dev);
+	cdns_pcie_disable_phy(pcie);
+	phy_count = pcie->phy_count;
+	while (phy_count--)
+		device_link_del(pcie->link[phy_count]);
+
+	return ret;
+}
+
+static void cdns_pcie_shutdown(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct cdns_pcie *pcie = dev_get_drvdata(dev);
+	int ret;
+
+	ret = pm_runtime_put_sync(dev);
+	if (ret < 0)
+		dev_dbg(dev, "pm_runtime_put_sync failed\n");
+
+	pm_runtime_disable(dev);
+	cdns_pcie_disable_phy(pcie);
+}
+
+static struct platform_driver cdns_pcie_host_driver = {
+	.driver = {
+		.name = "cdns-pcie-host",
+		.of_match_table = cdns_pcie_host_of_match,
+		.pm	= &cdns_pcie_pm_ops,
+	},
+	.probe = cdns_pcie_host_probe,
+	.shutdown = cdns_pcie_shutdown,
+};
+builtin_platform_driver(cdns_pcie_host_driver);
diff --git a/marvell/linux/drivers/pci/controller/pcie-cadence.c b/marvell/linux/drivers/pci/controller/pcie-cadence.c
new file mode 100644
index 0000000..cd795f6
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pcie-cadence.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Cadence
+// Cadence PCIe controller driver.
+// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
+
+#include <linux/kernel.h>
+
+#include "pcie-cadence.h"
+
+void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn,
+				   u32 r, bool is_io,
+				   u64 cpu_addr, u64 pci_addr, size_t size)
+{
+	/*
+	 * roundup_pow_of_two() returns an unsigned long, which is not suited
+	 * for 64bit values.
+	 */
+	u64 sz = 1ULL << fls64(size - 1);
+	int nbits = ilog2(sz);
+	u32 addr0, addr1, desc0, desc1;
+
+	if (nbits < 8)
+		nbits = 8;
+
+	/* Set the PCI address */
+	addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) |
+		(lower_32_bits(pci_addr) & GENMASK(31, 8));
+	addr1 = upper_32_bits(pci_addr);
+
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), addr0);
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), addr1);
+
+	/* Set the PCIe header descriptor */
+	if (is_io)
+		desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO;
+	else
+		desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM;
+	desc1 = 0;
+
+	/*
+	 * Whatever Bit [23] is set or not inside DESC0 register of the outbound
+	 * PCIe descriptor, the PCI function number must be set into
+	 * Bits [26:24] of DESC0 anyway.
+	 *
+	 * In Root Complex mode, the function number is always 0 but in Endpoint
+	 * mode, the PCIe controller may support more than one function. This
+	 * function number needs to be set properly into the outbound PCIe
+	 * descriptor.
+	 *
+	 * Besides, setting Bit [23] is mandatory when in Root Complex mode:
+	 * then the driver must provide the bus, resp. device, number in
+	 * Bits [7:0] of DESC1, resp. Bits[31:27] of DESC0. Like the function
+	 * number, the device number is always 0 in Root Complex mode.
+	 *
+	 * However when in Endpoint mode, we can clear Bit [23] of DESC0, hence
+	 * the PCIe controller will use the captured values for the bus and
+	 * device numbers.
+	 */
+	if (pcie->is_rc) {
+		/* The device and function numbers are always 0. */
+		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
+			 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
+		desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(pcie->bus);
+	} else {
+		/*
+		 * Use captured values for bus and device numbers but still
+		 * need to set the function number.
+		 */
+		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
+	}
+
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
+
+	/* Set the CPU address */
+	cpu_addr -= pcie->mem_res->start;
+	addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) |
+		(lower_32_bits(cpu_addr) & GENMASK(31, 8));
+	addr1 = upper_32_bits(cpu_addr);
+
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
+}
+
+void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn,
+						  u32 r, u64 cpu_addr)
+{
+	u32 addr0, addr1, desc0, desc1;
+
+	desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG;
+	desc1 = 0;
+
+	/* See cdns_pcie_set_outbound_region() comments above. */
+	if (pcie->is_rc) {
+		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
+			 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
+		desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(pcie->bus);
+	} else {
+		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
+	}
+
+	/* Set the CPU address */
+	cpu_addr -= pcie->mem_res->start;
+	addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) |
+		(lower_32_bits(cpu_addr) & GENMASK(31, 8));
+	addr1 = upper_32_bits(cpu_addr);
+
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
+}
+
+void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
+{
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
+
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), 0);
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), 0);
+
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0);
+	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0);
+}
+
+void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
+{
+	int i = pcie->phy_count;
+
+	while (i--) {
+		phy_power_off(pcie->phy[i]);
+		phy_exit(pcie->phy[i]);
+	}
+}
+
+int cdns_pcie_enable_phy(struct cdns_pcie *pcie)
+{
+	int ret;
+	int i;
+
+	for (i = 0; i < pcie->phy_count; i++) {
+		ret = phy_init(pcie->phy[i]);
+		if (ret < 0)
+			goto err_phy;
+
+		ret = phy_power_on(pcie->phy[i]);
+		if (ret < 0) {
+			phy_exit(pcie->phy[i]);
+			goto err_phy;
+		}
+	}
+
+	return 0;
+
+err_phy:
+	while (--i >= 0) {
+		phy_power_off(pcie->phy[i]);
+		phy_exit(pcie->phy[i]);
+	}
+
+	return ret;
+}
+
+int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
+{
+	struct device_node *np = dev->of_node;
+	int phy_count;
+	struct phy **phy;
+	struct device_link **link;
+	int i;
+	int ret;
+	const char *name;
+
+	phy_count = of_property_count_strings(np, "phy-names");
+	if (phy_count < 1) {
+		dev_err(dev, "no phy-names.  PHY will not be initialized\n");
+		pcie->phy_count = 0;
+		return 0;
+	}
+
+	phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL);
+	if (!phy)
+		return -ENOMEM;
+
+	link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL);
+	if (!link)
+		return -ENOMEM;
+
+	for (i = 0; i < phy_count; i++) {
+		of_property_read_string_index(np, "phy-names", i, &name);
+		phy[i] = devm_phy_get(dev, name);
+		if (IS_ERR(phy[i])) {
+			ret = PTR_ERR(phy[i]);
+			goto err_phy;
+		}
+		link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
+		if (!link[i]) {
+			devm_phy_put(dev, phy[i]);
+			ret = -EINVAL;
+			goto err_phy;
+		}
+	}
+
+	pcie->phy_count = phy_count;
+	pcie->phy = phy;
+	pcie->link = link;
+
+	ret =  cdns_pcie_enable_phy(pcie);
+	if (ret)
+		goto err_phy;
+
+	return 0;
+
+err_phy:
+	while (--i >= 0) {
+		device_link_del(link[i]);
+		devm_phy_put(dev, phy[i]);
+	}
+
+	return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cdns_pcie_suspend_noirq(struct device *dev)
+{
+	struct cdns_pcie *pcie = dev_get_drvdata(dev);
+
+	cdns_pcie_disable_phy(pcie);
+
+	return 0;
+}
+
+static int cdns_pcie_resume_noirq(struct device *dev)
+{
+	struct cdns_pcie *pcie = dev_get_drvdata(dev);
+	int ret;
+
+	ret = cdns_pcie_enable_phy(pcie);
+	if (ret) {
+		dev_err(dev, "failed to enable phy\n");
+		return ret;
+	}
+
+	return 0;
+}
+#endif
+
+const struct dev_pm_ops cdns_pcie_pm_ops = {
+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
+				      cdns_pcie_resume_noirq)
+};
diff --git a/marvell/linux/drivers/pci/controller/pcie-cadence.h b/marvell/linux/drivers/pci/controller/pcie-cadence.h
new file mode 100644
index 0000000..ae6bf2a
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pcie-cadence.h
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Cadence
+// Cadence PCIe controller driver.
+// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
+
+#ifndef _PCIE_CADENCE_H
+#define _PCIE_CADENCE_H
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+
+/*
+ * Local Management Registers
+ */
+#define CDNS_PCIE_LM_BASE	0x00100000
+
+/* Vendor ID Register */
+#define CDNS_PCIE_LM_ID		(CDNS_PCIE_LM_BASE + 0x0044)
+#define  CDNS_PCIE_LM_ID_VENDOR_MASK	GENMASK(15, 0)
+#define  CDNS_PCIE_LM_ID_VENDOR_SHIFT	0
+#define  CDNS_PCIE_LM_ID_VENDOR(vid) \
+	(((vid) << CDNS_PCIE_LM_ID_VENDOR_SHIFT) & CDNS_PCIE_LM_ID_VENDOR_MASK)
+#define  CDNS_PCIE_LM_ID_SUBSYS_MASK	GENMASK(31, 16)
+#define  CDNS_PCIE_LM_ID_SUBSYS_SHIFT	16
+#define  CDNS_PCIE_LM_ID_SUBSYS(sub) \
+	(((sub) << CDNS_PCIE_LM_ID_SUBSYS_SHIFT) & CDNS_PCIE_LM_ID_SUBSYS_MASK)
+
+/* Root Port Requestor ID Register */
+#define CDNS_PCIE_LM_RP_RID	(CDNS_PCIE_LM_BASE + 0x0228)
+#define  CDNS_PCIE_LM_RP_RID_MASK	GENMASK(15, 0)
+#define  CDNS_PCIE_LM_RP_RID_SHIFT	0
+#define  CDNS_PCIE_LM_RP_RID_(rid) \
+	(((rid) << CDNS_PCIE_LM_RP_RID_SHIFT) & CDNS_PCIE_LM_RP_RID_MASK)
+
+/* Endpoint Bus and Device Number Register */
+#define CDNS_PCIE_LM_EP_ID	(CDNS_PCIE_LM_BASE + 0x022c)
+#define  CDNS_PCIE_LM_EP_ID_DEV_MASK	GENMASK(4, 0)
+#define  CDNS_PCIE_LM_EP_ID_DEV_SHIFT	0
+#define  CDNS_PCIE_LM_EP_ID_BUS_MASK	GENMASK(15, 8)
+#define  CDNS_PCIE_LM_EP_ID_BUS_SHIFT	8
+
+/* Endpoint Function f BAR b Configuration Registers */
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \
+	(CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008)
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \
+	(CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008)
+#define  CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
+	(GENMASK(4, 0) << ((b) * 8))
+#define  CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
+	(((a) << ((b) * 8)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b))
+#define  CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \
+	(GENMASK(7, 5) << ((b) * 8))
+#define  CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \
+	(((c) << ((b) * 8 + 5)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
+
+/* Endpoint Function Configuration Register */
+#define CDNS_PCIE_LM_EP_FUNC_CFG	(CDNS_PCIE_LM_BASE + 0x02c0)
+
+/* Root Complex BAR Configuration Register */
+#define CDNS_PCIE_LM_RC_BAR_CFG	(CDNS_PCIE_LM_BASE + 0x0300)
+#define  CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK	GENMASK(5, 0)
+#define  CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE(a) \
+	(((a) << 0) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK)
+#define  CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK		GENMASK(8, 6)
+#define  CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(c) \
+	(((c) << 6) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK)
+#define  CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK	GENMASK(13, 9)
+#define  CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE(a) \
+	(((a) << 9) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK)
+#define  CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK		GENMASK(16, 14)
+#define  CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(c) \
+	(((c) << 14) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK)
+#define  CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE	BIT(17)
+#define  CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_32BITS	0
+#define  CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS	BIT(18)
+#define  CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE		BIT(19)
+#define  CDNS_PCIE_LM_RC_BAR_CFG_IO_16BITS		0
+#define  CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS		BIT(20)
+#define  CDNS_PCIE_LM_RC_BAR_CFG_CHECK_ENABLE		BIT(31)
+
+/* BAR control values applicable to both Endpoint Function and Root Complex */
+#define  CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED		0x0
+#define  CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS		0x1
+#define  CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS		0x4
+#define  CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS	0x5
+#define  CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS		0x6
+#define  CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS	0x7
+
+
+/*
+ * Endpoint Function Registers (PCI configuration space for endpoint functions)
+ */
+#define CDNS_PCIE_EP_FUNC_BASE(fn)	(((fn) << 12) & GENMASK(19, 12))
+
+#define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET	0x90
+
+/*
+ * Root Port Registers (PCI configuration space for the root port function)
+ */
+#define CDNS_PCIE_RP_BASE	0x00200000
+
+
+/*
+ * Address Translation Registers
+ */
+#define CDNS_PCIE_AT_BASE	0x00400000
+
+/* Region r Outbound AXI to PCIe Address Translation Register 0 */
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
+	(CDNS_PCIE_AT_BASE + 0x0000 + ((r) & 0x1f) * 0x0020)
+#define  CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK	GENMASK(5, 0)
+#define  CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \
+	(((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK)
+#define  CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK	GENMASK(19, 12)
+#define  CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
+	(((devfn) << 12) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK)
+#define  CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK	GENMASK(27, 20)
+#define  CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
+	(((bus) << 20) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK)
+
+/* Region r Outbound AXI to PCIe Address Translation Register 1 */
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r) \
+	(CDNS_PCIE_AT_BASE + 0x0004 + ((r) & 0x1f) * 0x0020)
+
+/* Region r Outbound PCIe Descriptor Register 0 */
+#define CDNS_PCIE_AT_OB_REGION_DESC0(r) \
+	(CDNS_PCIE_AT_BASE + 0x0008 + ((r) & 0x1f) * 0x0020)
+#define  CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MASK		GENMASK(3, 0)
+#define  CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM		0x2
+#define  CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO		0x6
+#define  CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0	0xa
+#define  CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1	0xb
+#define  CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG	0xc
+#define  CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_VENDOR_MSG	0xd
+/* Bit 23 MUST be set in RC mode. */
+#define  CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID	BIT(23)
+#define  CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK	GENMASK(31, 24)
+#define  CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \
+	(((devfn) << 24) & CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK)
+
+/* Region r Outbound PCIe Descriptor Register 1 */
+#define CDNS_PCIE_AT_OB_REGION_DESC1(r)	\
+	(CDNS_PCIE_AT_BASE + 0x000c + ((r) & 0x1f) * 0x0020)
+#define  CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK	GENMASK(7, 0)
+#define  CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus) \
+	((bus) & CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK)
+
+/* Region r AXI Region Base Address Register 0 */
+#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r) \
+	(CDNS_PCIE_AT_BASE + 0x0018 + ((r) & 0x1f) * 0x0020)
+#define  CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK	GENMASK(5, 0)
+#define  CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \
+	(((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK)
+
+/* Region r AXI Region Base Address Register 1 */
+#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r) \
+	(CDNS_PCIE_AT_BASE + 0x001c + ((r) & 0x1f) * 0x0020)
+
+/* Root Port BAR Inbound PCIe to AXI Address Translation Register */
+#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar) \
+	(CDNS_PCIE_AT_BASE + 0x0800 + (bar) * 0x0008)
+#define  CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK	GENMASK(5, 0)
+#define  CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \
+	(((nbits) - 1) & CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK)
+#define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \
+	(CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008)
+
+/* AXI link down register */
+#define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824)
+
+enum cdns_pcie_rp_bar {
+	RP_BAR0,
+	RP_BAR1,
+	RP_NO_BAR
+};
+
+/* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */
+#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
+	(CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008)
+#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
+	(CDNS_PCIE_AT_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008)
+
+/* Normal/Vendor specific message access: offset inside some outbound region */
+#define CDNS_PCIE_NORMAL_MSG_ROUTING_MASK	GENMASK(7, 5)
+#define CDNS_PCIE_NORMAL_MSG_ROUTING(route) \
+	(((route) << 5) & CDNS_PCIE_NORMAL_MSG_ROUTING_MASK)
+#define CDNS_PCIE_NORMAL_MSG_CODE_MASK		GENMASK(15, 8)
+#define CDNS_PCIE_NORMAL_MSG_CODE(code) \
+	(((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK)
+#define CDNS_PCIE_MSG_NO_DATA			BIT(16)
+
+enum cdns_pcie_msg_code {
+	MSG_CODE_ASSERT_INTA	= 0x20,
+	MSG_CODE_ASSERT_INTB	= 0x21,
+	MSG_CODE_ASSERT_INTC	= 0x22,
+	MSG_CODE_ASSERT_INTD	= 0x23,
+	MSG_CODE_DEASSERT_INTA	= 0x24,
+	MSG_CODE_DEASSERT_INTB	= 0x25,
+	MSG_CODE_DEASSERT_INTC	= 0x26,
+	MSG_CODE_DEASSERT_INTD	= 0x27,
+};
+
+enum cdns_pcie_msg_routing {
+	/* Route to Root Complex */
+	MSG_ROUTING_TO_RC,
+
+	/* Use Address Routing */
+	MSG_ROUTING_BY_ADDR,
+
+	/* Use ID Routing */
+	MSG_ROUTING_BY_ID,
+
+	/* Route as Broadcast Message from Root Complex */
+	MSG_ROUTING_BCAST,
+
+	/* Local message; terminate at receiver (INTx messages) */
+	MSG_ROUTING_LOCAL,
+
+	/* Gather & route to Root Complex (PME_TO_Ack message) */
+	MSG_ROUTING_GATHER,
+};
+
+/**
+ * struct cdns_pcie - private data for Cadence PCIe controller drivers
+ * @reg_base: IO mapped register base
+ * @mem_res: start/end offsets in the physical system memory to map PCI accesses
+ * @is_rc: tell whether the PCIe controller mode is Root Complex or Endpoint.
+ * @bus: In Root Complex mode, the bus number
+ */
+struct cdns_pcie {
+	void __iomem		*reg_base;
+	struct resource		*mem_res;
+	bool			is_rc;
+	u8			bus;
+	int			phy_count;
+	struct phy		**phy;
+	struct device_link	**link;
+};
+
+/* Register access */
+static inline void cdns_pcie_writeb(struct cdns_pcie *pcie, u32 reg, u8 value)
+{
+	writeb(value, pcie->reg_base + reg);
+}
+
+static inline void cdns_pcie_writew(struct cdns_pcie *pcie, u32 reg, u16 value)
+{
+	writew(value, pcie->reg_base + reg);
+}
+
+static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value)
+{
+	writel(value, pcie->reg_base + reg);
+}
+
+static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg)
+{
+	return readl(pcie->reg_base + reg);
+}
+
+/* Root Port register access */
+static inline void cdns_pcie_rp_writeb(struct cdns_pcie *pcie,
+				       u32 reg, u8 value)
+{
+	writeb(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg);
+}
+
+static inline void cdns_pcie_rp_writew(struct cdns_pcie *pcie,
+				       u32 reg, u16 value)
+{
+	writew(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg);
+}
+
+/* Endpoint Function register access */
+static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn,
+					  u32 reg, u8 value)
+{
+	writeb(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
+}
+
+static inline void cdns_pcie_ep_fn_writew(struct cdns_pcie *pcie, u8 fn,
+					  u32 reg, u16 value)
+{
+	writew(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
+}
+
+static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn,
+					  u32 reg, u32 value)
+{
+	writel(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
+}
+
+static inline u8 cdns_pcie_ep_fn_readb(struct cdns_pcie *pcie, u8 fn, u32 reg)
+{
+	return readb(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
+}
+
+static inline u16 cdns_pcie_ep_fn_readw(struct cdns_pcie *pcie, u8 fn, u32 reg)
+{
+	return readw(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
+}
+
+static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg)
+{
+	return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
+}
+
+void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn,
+				   u32 r, bool is_io,
+				   u64 cpu_addr, u64 pci_addr, size_t size);
+
+void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn,
+						  u32 r, u64 cpu_addr);
+
+void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r);
+void cdns_pcie_disable_phy(struct cdns_pcie *pcie);
+int cdns_pcie_enable_phy(struct cdns_pcie *pcie);
+int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie);
+extern const struct dev_pm_ops cdns_pcie_pm_ops;
+
+#endif /* _PCIE_CADENCE_H */
diff --git a/marvell/linux/drivers/pci/controller/pcie-falcon.c b/marvell/linux/drivers/pci/controller/pcie-falcon.c
new file mode 100644
index 0000000..a165ea5
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pcie-falcon.c
@@ -0,0 +1,1037 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ASR PCIe host controller driver.
+ *
+ * Copyright (c) 2021 ASRMicro Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/msi.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/pm_qos.h>
+#include <linux/cputype.h>
+#include <soc/asr/regs-addr.h>
+#include "../pci.h"
+#include "pcie-falcon.h"
+
+/* Time for delay */
+#define REF_PERST_MIN		20000
+#define REF_PERST_MAX		25000
+#define PERST_ACCESS_MIN	10000
+#define PERST_ACCESS_MAX	12000
+
+struct falcon_pcie_port;
+
+struct falcon_pcie_soc {
+	struct pci_ops *ops;
+};
+
+struct falcon_pcie_port {
+	void __iomem *base;
+	void __iomem *phy_base;
+	struct list_head list;
+	struct falcon_pcie *pcie;
+	struct clk *axi_ck;
+	struct phy *phy;
+	u32 slot;
+	int irq;
+	struct mutex lock;
+	s32  lpm_qos;
+	int  gpio_reset;
+	bool suspended;
+};
+
+#ifdef CONFIG_PCI_MSI
+#define INT_PCI_MSI_NR		32
+struct falcon_msi {
+	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
+	struct irq_domain *domain;
+	struct msi_controller chip;
+	unsigned long pages;
+	struct mutex lock;
+	int irq;
+};
+#endif
+struct falcon_pcie {
+	struct device *dev;
+	struct phy *phy;
+	struct falcon_pcie_port *port;
+	struct resource mem;
+	const struct falcon_pcie_soc *soc;
+	unsigned int busnr;
+	struct pci_host_bridge *host;
+	struct pm_qos_request   qos_idle;
+#ifdef CONFIG_PCI_MSI
+	struct falcon_msi msi;
+#endif
+};
+#ifdef CONFIG_PCI_MSI
+static inline struct falcon_msi *to_falcon_msi(struct msi_controller *chip)
+{
+	return container_of(chip, struct falcon_msi, chip);
+};
+#endif
+
+static void falcon_reset(struct falcon_pcie_port *port, u8 on)
+{
+	int ret;
+
+	if (!gpio_request(port->gpio_reset, "pcie_perst")) {
+		usleep_range(REF_PERST_MIN, REF_PERST_MAX);
+		ret = gpio_direction_output(port->gpio_reset, on);
+		gpio_free(port->gpio_reset);
+		if (ret)
+			pr_err("Falcon perst gpio set output failed %d.\n",
+					port->gpio_reset);
+		usleep_range(PERST_ACCESS_MIN, PERST_ACCESS_MAX);
+	} else {
+		pr_err("Falcon perst gpio request failed %d.\n",
+				port->gpio_reset);
+	}
+}
+static void falcon_preset_assert(struct falcon_pcie_port *port)
+{
+	falcon_reset(port, 1);
+}
+
+static void falcon_preset_deassert(struct falcon_pcie_port *port)
+{
+	falcon_reset(port, 0);
+}
+
+int cfg_read(void __iomem *addr, int where, int size, u32 *val)
+{
+	*val = readl(addr);
+	if (size == 1)
+		*val = (*val >> (8 * (where & 3))) & 0xff;
+	else if (size == 2)
+		*val = (*val >> (8 * (where & 3))) & 0xffff;
+	else if (size != 4)
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+int cfg_write(void __iomem *addr, int where, int size, u32 val)
+{
+	if (size == 4)
+		writel(val, addr);
+	else if (size == 2)
+		writew(val, addr + (where & 2));
+	else if (size == 1)
+		writeb(val, addr + (where & 3));
+	else
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static void falcon_pcie_subsys_powerdown(struct falcon_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+
+	pm_runtime_put_sync(dev);
+	pm_runtime_disable(dev);
+}
+
+static void falcon_pcie_port_free(struct falcon_pcie_port *port)
+{
+	struct falcon_pcie *pcie = port->pcie;
+	struct device *dev = pcie->dev;
+
+	devm_iounmap(dev, port->base);
+	list_del(&port->list);
+	devm_kfree(dev, port);
+}
+
+static void falcon_pcie_put_resources(struct falcon_pcie *pcie)
+{
+	struct falcon_pcie_port *port = pcie->port;
+
+	phy_power_off(port->phy);
+	phy_exit(port->phy);
+	clk_disable_unprepare(port->axi_ck);
+	falcon_pcie_port_free(port);
+
+	falcon_pcie_subsys_powerdown(pcie);
+}
+
+static int falcon_pcie_hw_rd_cfg(struct falcon_pcie_port *port, u32 bus, u32 devfn,
+			      int where, int size, u32 *val)
+{
+	u32 value;
+	int ret;
+
+	if (port->suspended) {
+		return -1;
+	}
+	mutex_lock(&port->lock);
+	if (PCI_FUNC(devfn) == 0) {
+		value = readl(port->base + PCIE_CFGNUM);
+		if (bus == 1) {
+			writel((value|(0x1<<8)), port->base + PCIE_CFGNUM);	
+			ret = cfg_read(port->base + FALCON_PCIE_CONFIG_OFFSET
+					+ (where & ~0x3), where, size, val);
+		} else if (bus == 0) {
+			writel((value&(~(0x1<<8))), port->base + PCIE_CFGNUM);
+			ret = cfg_read(port->base + FALCON_PCIE_CONFIG_OFFSET
+						+ (where & ~0x3), where, size, val);
+		}
+	}
+
+	mutex_unlock(&port->lock);
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int falcon_pcie_hw_wr_cfg(struct falcon_pcie_port *port, u32 bus, u32 devfn,
+			      int where, int size, u32 val)
+{
+	int ret;
+	u32 value;
+
+	if (port->suspended) {
+		return -1;
+	}
+	mutex_lock(&port->lock);
+	if (PCI_FUNC(devfn) == 0) {
+		if (bus == 1) {
+			if ((where != CFG_BAR0_REG) && (where != CFG_BAR1_REG)) {
+				value = readl(port->base + PCIE_CFGNUM);
+				writel((value|(0x1<<8)), port->base + PCIE_CFGNUM);
+				ret = cfg_write(port->base + FALCON_PCIE_CONFIG_OFFSET
+						+ (where & ~0x3), where, size, val);
+			}
+		} else if (bus == 0){
+			/* avoid CFG_BAR0_REG/CFG_BAR1_REG to be overwritten later */
+			if ((where != CFG_BAR0_REG) && (where != CFG_BAR1_REG)) {
+				value = readl(port->base + PCIE_CFGNUM);
+				writel((value&(~(0x1<<8))), port->base + PCIE_CFGNUM);
+				ret = cfg_write(port->base + FALCON_PCIE_CONFIG_OFFSET
+					+ (where & ~0x3), where, size, val);
+			}
+		}		
+	}
+
+	mutex_unlock(&port->lock);
+
+	return ret;
+}
+
+static struct falcon_pcie_port *falcon_pcie_find_port(struct pci_bus *bus,
+						unsigned int devfn)
+{
+	struct falcon_pcie *pcie = bus->sysdata;
+	struct falcon_pcie_port *port = pcie->port;
+	struct pci_dev *dev = NULL;
+	/*
+	 * Walk the bus hierarchy to get the devfn value
+	 * of the port in the root bus.
+	 */
+	while (bus && bus->number) {
+		dev = bus->self;
+		bus = dev->bus;
+		devfn = dev->devfn;
+	}
+
+	if (port->slot == PCI_SLOT(devfn))
+		return port;
+
+	return NULL;
+}
+
+static int falcon_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
+				int where, int size, u32 *val)
+{
+	struct falcon_pcie_port *port;
+	u32 bn = bus->number;
+	int ret;
+
+	port = falcon_pcie_find_port(bus, devfn);
+	if (!port) {
+		*val = ~0;
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+	ret = falcon_pcie_hw_rd_cfg(port, bn, devfn, where, size, val);
+	if (ret)
+		*val = ~0;
+
+	return ret;
+}
+
+static int falcon_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
+				 int where, int size, u32 val)
+{
+	struct falcon_pcie_port *port;
+	u32 bn = bus->number;
+
+	port = falcon_pcie_find_port(bus, devfn);
+	if (!port)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	return falcon_pcie_hw_wr_cfg(port, bn, devfn, where, size, val);
+}
+
+static struct pci_ops falcon_pcie_ops = {
+	.read  = falcon_pcie_config_read,
+	.write = falcon_pcie_config_write,
+};
+
+#ifndef CONFIG_PCI_MSI
+static irqreturn_t falcon_pcie_irq_handler(int irq, void *arg)
+{
+	struct falcon_pcie_port *pp = arg;
+	u32 val;
+
+	val = readl(pp->base + XR3PCI_LOCAL_INT_STATUS);
+	if (val != 0) {
+		pm_wakeup_event(pp->pcie->dev, 2000);
+		val = readl(pp->base + XR3PCI_LOCAL_INT_STATUS);
+		writel(val, pp->base + XR3PCI_LOCAL_INT_STATUS);
+	}
+	return IRQ_HANDLED;
+}
+#endif
+
+#ifdef CONFIG_PCI_MSI
+static int falcon_msi_alloc(struct falcon_msi *chip)
+{
+	int msi;
+
+	mutex_lock(&chip->lock);
+
+	msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
+	if (msi < INT_PCI_MSI_NR)
+		set_bit(msi, chip->used);
+	else
+		msi = -ENOSPC;
+
+	mutex_unlock(&chip->lock);
+
+	return msi;
+}
+
+static int falcon_msi_alloc_region(struct falcon_msi *chip, int no_irqs)
+{
+	int msi;
+
+	mutex_lock(&chip->lock);
+	msi = bitmap_find_free_region(chip->used, INT_PCI_MSI_NR,
+				      order_base_2(no_irqs));
+	mutex_unlock(&chip->lock);
+
+	return msi;
+}
+
+static void falcon_msi_free(struct falcon_msi *chip, unsigned long irq)
+{
+	mutex_lock(&chip->lock);
+	clear_bit(irq, chip->used);
+	mutex_unlock(&chip->lock);
+}
+
+static irqreturn_t falcon_pcie_msi_irq(int irq, void *data)
+{
+	struct falcon_pcie *pcie = data;
+	struct falcon_msi *msi = &pcie->msi;
+	struct device *dev = pcie->dev;
+	unsigned int reg, val;
+
+	val = readl(pcie->port->base + XR3PCI_LOCAL_INT_STATUS);
+	if (val != 0)
+		pm_wakeup_event(pcie->dev, 2000);
+
+	reg = (u32)readl(pcie->port->base + XR3PCI_MSI_INT_STATUS);
+	if (!reg)
+		return IRQ_NONE;
+
+	while (reg) {
+		unsigned int index = find_first_bit((long unsigned int *)&reg, 32);
+		unsigned int msi_irq;
+
+		/* clear the interrupt */
+		writel(1 << index, pcie->port->base + XR3PCI_MSI_INT_STATUS);
+
+		msi_irq = irq_find_mapping(msi->domain, index);
+		if (msi_irq) {
+			if (test_bit(index, msi->used))
+				generic_handle_irq(msi_irq);
+			else
+				dev_info(dev, "unhandled MSI\n");
+		} else {
+			/* Unknown MSI, just clear it */
+			dev_dbg(dev, "unexpected MSI\n");
+		}
+
+		/* see if there's any more pending in this vector */
+		reg = (u32)readl(pcie->port->base + XR3PCI_MSI_INT_STATUS);
+	}
+	writel(val, pcie->port->base + XR3PCI_LOCAL_INT_STATUS);
+
+	return IRQ_HANDLED;
+}
+
+static int falcon_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
+			      struct msi_desc *desc)
+{
+	struct falcon_msi *msi = to_falcon_msi(chip);
+	struct falcon_pcie *pcie = container_of(chip, struct falcon_pcie, msi.chip);
+	struct msi_msg msg;
+	unsigned int irq;
+	int hwirq;
+	u64 msi_target;
+
+	hwirq = falcon_msi_alloc(msi);
+	if (hwirq < 0)
+		return hwirq;
+
+	irq = irq_find_mapping(msi->domain, hwirq);
+	if (!irq) {
+		falcon_msi_free(msi, hwirq);
+		return -EINVAL;
+	}
+
+	irq_set_msi_desc(irq, desc);
+
+	msi_target = (u64)readl(pcie->port->base + IMSI_ADDR);
+	/* EP need write to BAR0_ADDR + IMSI_ADDR to trigger MSI */
+	msi_target |= BAR0_ADDR;
+
+	msg.address_lo = (u32)(msi_target & 0xffffffff);
+	msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);
+	msg.data = hwirq;
+
+	pci_write_msi_msg(irq, &msg);
+
+	return 0;
+}
+
+static int falcon_msi_setup_irqs(struct msi_controller *chip,
+			       struct pci_dev *pdev, int nvec, int type)
+{
+	struct falcon_pcie *pcie = container_of(chip, struct falcon_pcie, msi.chip);
+	struct falcon_msi *msi = to_falcon_msi(chip);
+	struct msi_desc *desc;
+	struct msi_msg msg;
+	u64 msi_target;
+	unsigned int irq;
+	int hwirq;
+	int i;
+
+	/* MSI-X interrupts are not supported */
+	if (type == PCI_CAP_ID_MSIX)
+		return -EINVAL;
+
+	WARN_ON(!list_is_singular(&pdev->dev.msi_list));
+	desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
+
+	hwirq = falcon_msi_alloc_region(msi, nvec);
+	if (hwirq < 0)
+		return -ENOSPC;
+
+	irq = irq_find_mapping(msi->domain, hwirq);
+	if (!irq)
+		return -ENOSPC;
+
+	for (i = 0; i < nvec; i++) {
+		/*
+		 * irq_create_mapping() called from falcon_pcie_probe() pre-
+		 * allocates descs,  so there is no need to allocate descs here.
+		 * We can therefore assume that if irq_find_mapping() above
+		 * returns non-zero, then the descs are also successfully
+		 * allocated.
+		 */
+		if (irq_set_msi_desc_off(irq, i, desc)) {
+			/* TODO: clear */
+			return -EINVAL;
+		}
+	}
+
+	desc->nvec_used = nvec;
+	desc->msi_attrib.multiple = order_base_2(nvec);
+
+	msi_target = (u64)readl(pcie->port->base + IMSI_ADDR);
+	/* EP need write to BAR0_ADDR + IMSI_ADDR to trigger MSI */
+	msi_target |= BAR0_ADDR;
+
+	msg.address_lo = (u32)(msi_target & 0xffffffff);
+	msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);
+	msg.data = hwirq;
+
+	pci_write_msi_msg(irq, &msg);
+
+	return 0;
+}
+
+static void falcon_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
+{
+	struct falcon_msi *msi = to_falcon_msi(chip);
+	struct irq_data *d = irq_get_irq_data(irq);
+
+	falcon_msi_free(msi, d->hwirq);
+}
+
+static struct irq_chip falcon_msi_irq_chip = {
+	.name = "PCI-MSI",
+	.irq_enable = pci_msi_unmask_irq,
+	.irq_disable = pci_msi_mask_irq,
+	.irq_mask = pci_msi_mask_irq,
+	.irq_unmask = pci_msi_unmask_irq,
+};
+
+static int falcon_msi_map(struct irq_domain *domain, unsigned int irq,
+			irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &falcon_msi_irq_chip, handle_simple_irq);
+	irq_set_chip_data(irq, domain->host_data);
+
+	return 0;
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+	.map = falcon_msi_map,
+};
+
+static void falcon_pcie_unmap_msi(struct falcon_pcie *pcie)
+{
+	struct falcon_msi *msi = &pcie->msi;
+	int i, irq;
+
+	for (i = 0; i < INT_PCI_MSI_NR; i++) {
+		irq = irq_find_mapping(msi->domain, i);
+		if (irq > 0)
+			irq_dispose_mapping(irq);
+	}
+
+	irq_domain_remove(msi->domain);
+}
+
+static int falcon_pcie_enable_msi(struct falcon_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	struct falcon_msi *msi = &pcie->msi;
+	//phys_addr_t base;
+	int err, i;
+
+	mutex_init(&msi->lock);
+
+	msi->chip.dev = dev;
+	msi->chip.setup_irq = falcon_msi_setup_irq;
+	msi->chip.setup_irqs = falcon_msi_setup_irqs;
+	msi->chip.teardown_irq = falcon_msi_teardown_irq;
+
+	msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
+					    &msi_domain_ops, &msi->chip);
+	if (!msi->domain) {
+		dev_err(dev, "failed to create IRQ domain\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < INT_PCI_MSI_NR; i++) {
+		irq_create_mapping(msi->domain, i);
+	}
+	err = devm_request_irq(dev, pcie->port->irq + 16, falcon_pcie_msi_irq,
+			       IRQF_SHARED, falcon_msi_irq_chip.name, pcie);
+	if (err < 0) {
+		dev_err(dev, "failed to request IRQ: %d\n", err);
+		goto err;
+	}
+
+	/* clear all interrupts */
+	writel(0xffffffff, pcie->port->base + XR3PCI_LOCAL_INT_STATUS);
+	/* enable all MSI interrupts */
+	writel(XR3PCI_INT_MSI, pcie->port->base + XR3PCI_LOCAL_INT_MASK);
+
+	return 0;
+
+err:
+	falcon_pcie_unmap_msi(pcie);
+	return err;
+}
+
+#endif
+
+#ifndef CONFIG_PCI_MSI
+static int falcon_enable_interrupt(struct falcon_pcie_port *port)
+{
+	/* clear all interrupts */
+	writel(0xffffffff, port->base + XR3PCI_LOCAL_INT_STATUS);
+	/* Enable legacy interrupts */
+	writel(XR3PCI_INT_INTx, port->base + XR3PCI_LOCAL_INT_MASK);
+
+	return 0;
+}
+#endif
+
+static void falcon_update_atr_entry(void __iomem *base,
+			resource_size_t src_addr, resource_size_t trsl_addr,
+			int trsl_param, int window_size)
+{
+	/* bit 0: enable entry, bits 1-6: ATR window size (2^window_size + 1) */
+	writel(src_addr | (window_size<<1) | 0x1, base + XR3PCI_ATR_SRC_ADDR_LOW);
+	writel(0, base + XR3PCI_ATR_SRC_ADDR_HIGH);
+	writel(trsl_addr, base + XR3PCI_ATR_TRSL_ADDR_LOW);
+	writel(0, base + XR3PCI_ATR_TRSL_ADDR_HIGH);
+	writel(trsl_param, base + XR3PCI_ATR_TRSL_PARAM);
+}
+
+static int falcon_pcie_startup_port(struct falcon_pcie_port *port)
+{
+	void __iomem *table_base;
+	resource_size_t src_addr;
+	unsigned long ddr_size_mb = get_num_physpages() >> (20 - PAGE_SHIFT);
+	int err, i = 0;
+	u32 val, address;
+
+	/* Address translation from CPU to PCIe */
+	table_base = port->base + XR3PCI_ATR_AXI4_SLV0;
+	address = (int)port->base;
+	if((address & 0xfffff) == 0x88000)
+		src_addr = (0xe0000000&0xFF000000) + 0x100000;
+	if((address & 0xfffff) == 0x8c000)
+		src_addr = (0xd8000000&0xFF000000) + 0x100000;
+	/* map slave if to PCIe IF */
+	falcon_update_atr_entry(port->base + XR3PCI_ATR_AXI4_SLV0,
+		src_addr, src_addr, XR3PCI_ATR_TRSLID_PCIE_CONF, 0x19);
+
+	/* map DDR addr range */
+	while (ddr_size_mb >>= 1)
+		i++;
+	falcon_update_atr_entry(port->base + XR3PCI_ATR_PCIE_WIN0 + 0x20,
+		0, 0, XR3PCI_ATR_TRSLID_AXIMEMORY, (20 + i - 1));
+	pr_debug("%s: log(2) of ddr size = %d\n", __func__, i);
+
+	/* disable the other ATU windows */
+	for (i = XR3PCI_ATR_PCIE_WIN0 + 0x40; i < XR3PCI_ATR_AXI4_SLV0; i += 0x20)
+	{
+		falcon_update_atr_entry(port->base + i,
+			0, 0, XR3PCI_ATR_TRSLID_AXIMEMORY, 0);
+	}
+
+	/* register1 bus enable */
+	val = readl(port->base + CFG_STATUS_COMMAND);
+	writel(val|0x7, port->base + CFG_STATUS_COMMAND);
+	/* PCIE_CFGNUM Bus Number, Pri=0, Sec=1, Sub=01 */
+	writel(0x1<<16 | 0x1<<8, port->base + CFG_SUBSEC_PRIM);
+#ifdef CONFIG_CPU_ASR1903
+	if (cpu_is_asr1903() && (!cpu_is_asr1903_z1())) {
+		val = readl(port->base + PCIE_CFGCTRL);
+		val &= ~(0x1<<2);                       //clear ltssm disable bit, enable ltssm
+		writel(val, port->base + PCIE_CFGCTRL);
+	}
+#endif
+	err = readl_poll_timeout_atomic(port->base + XR3PCI_BASIC_STATUS, val,
+					(val & XR3PCI_BS_LINK_MASK), 10, 100 * USEC_PER_MSEC);
+	if (err) {
+		pr_info(DEVICE_NAME ": No link negotiated\n");
+		return -EIO;
+	}
+
+	mdelay(1);
+	val = readl(port->base + XR3PCI_BASIC_STATUS);
+	pr_info(DEVICE_NAME " %dx link negotiated (gen %d), maxpayload %d \
+		maxreqsize %d\n",
+		val & XR3PCI_BS_LINK_MASK, (val & XR3PCI_BS_GEN_MASK) >> 8,
+		2 << (6 + ((val & XR3PCI_BS_NEG_PAYLOAD_MASK) >> 24)),
+		2 << (6 + ((val & XR3PCI_BS_NEG_REQSIZE_MASK) >> 28)));
+#ifndef CONFIG_PCI_MSI
+	falcon_enable_interrupt(port);
+#endif
+	return 0;
+}
+
+static int falcon_pcie_enable_port(struct falcon_pcie_port *port)
+{
+	int err;
+	u32 val;
+
+	err = clk_prepare_enable(port->axi_ck);
+	if (err) {
+		pr_info("failed to enable axi_ck%d\n", port->slot);
+		goto err_axi_clk;
+	}
+#ifdef CONFIG_CPU_ASR1903
+	if (cpu_is_asr1903() && (!cpu_is_asr1903_z1())) {
+		val = readl(regs_addr_get_va(REGS_ADDR_APMU) + APMU_PCIE_CLK_RST_CTRL);
+		writel(val | (1<<27), regs_addr_get_va(REGS_ADDR_APMU) + APMU_PCIE_CLK_RST_CTRL);
+	}
+#endif
+	err = phy_init(port->phy);
+	if (err) {
+		pr_info("failed to initialize port%d phy\n", port->slot);
+		goto err_phy_init;
+	}
+	/* perst assert Endpoint */
+	falcon_preset_assert(port);
+
+	val = readl(port->base + CFG_DEVICE_CTRL);
+	/* max payload size 256, max read request size 256 */
+	writel((val&0xffff9f3f), port->base + CFG_DEVICE_CTRL);
+	val = readl(port->base + CFG_DEVICE_CAP);
+	writel((val&0xffffff18), port->base + CFG_DEVICE_CAP);
+#ifdef CONFIG_CPU_ASR1903
+	if (cpu_is_asr1903() && (!cpu_is_asr1903_z1())) {
+		writel(0xffffffff, port->base + PCIE_BAR1);
+		val = readl(port->base + PCIE_BAR0);
+		writel(val | 0x4, port->base + PCIE_BAR0);
+	}
+#endif
+	/*
+	 * Set BAR0_REG to BAR0_ADDR to avoid config space overwritten.
+	 * From PLDA spec: "PCI Express BAR0/1 is configured as 64-bit prefetchable memory space of 16 KBytes.
+	 * PCIe read and write requests targeting BAR0/1 are routed to Bridge Configuration space."
+	 */
+	val = readl(port->base + PCIE_CFGNUM);
+	writel((val & (~(0x1<<8))), port->base + PCIE_CFGNUM);
+	writel(0x0, port->base + CFG_BAR1_REG);
+	writel(BAR0_ADDR | 0xc, port->base + CFG_BAR0_REG);
+
+	err = falcon_pcie_startup_port(port);
+	if (err)
+		goto err_phy_init;
+
+	return 0;
+
+err_phy_init:
+	clk_disable_unprepare(port->axi_ck);
+err_axi_clk:
+	falcon_pcie_port_free(port);
+	return -1;
+}
+
+static int falcon_pcie_parse_port(struct falcon_pcie *pcie,
+			       struct device_node *node,
+			       int slot)
+{
+	struct falcon_pcie_port *port;
+	struct resource *regs;
+	struct device *dev = pcie->dev;
+	struct platform_device *pdev = to_platform_device(dev);
+	const __be32 *prop;
+	unsigned int proplen;
+	int err;
+
+	port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+	if (!port)
+		return -ENOMEM;
+
+	regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pciephy");
+	port->phy_base = (void *)(regs->start);
+
+	regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pciectrl");
+	port->base = devm_ioremap_resource(dev, regs);
+	if (IS_ERR(port->base)) {
+		dev_err(dev, "failed to map port%d base\n", slot);
+		return PTR_ERR(port->base);
+	}
+
+	port->axi_ck = of_clk_get_by_name(node, NULL);
+	if (IS_ERR(port->axi_ck)) {
+		dev_err(&pdev->dev, "Error %ld to get pcie clock\n",
+			PTR_ERR(port->axi_ck));
+		return PTR_ERR(port->axi_ck);
+	}
+
+	/* some platforms may use default PHY setting */
+	port->phy = devm_phy_optional_get(dev, "pcie-phy");
+	if (IS_ERR(port->phy))
+		return PTR_ERR(port->phy);
+
+	port->slot = slot;
+	port->pcie = pcie;
+	pcie->port = port;
+	mutex_unlock(&port->lock);
+	port->suspended = false;
+	prop = of_get_property(node, "lpm-qos", &proplen);
+	if (prop)		
+		port->lpm_qos = be32_to_cpup(prop);
+	
+	err = of_property_read_u32(node, "interrupts", &(port->irq));
+	if (err)
+		return err;
+
+#ifndef CONFIG_PCI_MSI
+	port->irq += 16;
+	err = devm_request_irq(dev, port->irq, falcon_pcie_irq_handler,
+				IRQF_SHARED, "falcon-pcie", port);
+	if (err) {
+		dev_err(dev, "failed to request irq %d\n", port->irq);
+		return err;
+	}
+#endif
+	port->gpio_reset = of_get_named_gpio(node, "reset-gpios", 0);
+	if (port->gpio_reset < 0)
+		return -ENODEV;
+
+	INIT_LIST_HEAD(&port->list);
+
+	return 0;
+}
+
+static int falcon_pcie_subsys_powerup(struct falcon_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+
+	pm_runtime_enable(dev);
+	pm_runtime_get_sync(dev);
+
+	return 0;
+}
+
+static int falcon_pcie_setup(struct falcon_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	struct device_node *node = dev->of_node;
+	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+	struct list_head *windows = &host->windows;
+	struct resource_entry *win, *tmp_win;
+	resource_size_t io_base;
+	int err;
+
+	err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+						    windows, &io_base);
+	if (err)
+		return err;
+
+	err = devm_request_pci_bus_resources(dev, windows);
+	if (err < 0)
+		return err;
+
+	/* Get the I/O and memory ranges from DT */
+	resource_list_for_each_entry_safe(win, tmp_win, windows) {
+		switch (resource_type(win->res)) {
+		case IORESOURCE_IO:
+			err = devm_pci_remap_iospace(dev, win->res, io_base);
+			if (err) {
+				dev_warn(dev, "error %d: failed to map resource %pR\n",
+					 err, win->res);
+				resource_list_destroy_entry(win);
+			}
+			break;
+		case IORESOURCE_MEM:
+			memcpy(&pcie->mem, win->res, sizeof(*win->res));
+			pcie->mem.name = "mem";
+			pcie->busnr = 0;
+			break;
+		case IORESOURCE_BUS:
+			pcie->busnr = win->res->start;
+			break;
+		}
+	}
+
+	err = falcon_pcie_parse_port(pcie, node, 0);
+	if (err)
+		goto error_put_node;
+
+	err = falcon_pcie_subsys_powerup(pcie);
+	if (err)
+		return err;
+
+	err = falcon_pcie_enable_port(pcie->port);
+	if (err)
+		return err;
+#ifdef CONFIG_PCI_MSI
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		err = falcon_pcie_enable_msi(pcie);
+		if (err < 0) {
+			dev_err(dev,
+				"failed to enable MSI support: %d\n",
+				err);
+			goto error_put_node;
+		}
+	}
+#endif
+	return 0;
+error_put_node:
+	return err;
+}
+
+static int falcon_pcie_suspend_noirq(struct device *dev);
+static int falcon_pcie_resume_noirq(struct device *dev);
+static ssize_t host_ctrl_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct falcon_pcie *pcie = dev_get_drvdata(dev);
+	int ret;
+	u32 on;
+
+	ret = sscanf(buf, "%x\n", &on);
+	if (ret != 1)
+		return -EINVAL;
+	if (!on) {
+		dev_info(dev, "perst deassert and suspend pcie controller.\n");
+		falcon_pcie_suspend_noirq(dev);
+		falcon_preset_deassert(pcie->port);
+	} else {
+		dev_info(dev, "perst assert and resume pcie controller.\n");
+		falcon_preset_assert(pcie->port);
+		falcon_pcie_resume_noirq(dev);
+	}
+
+	return count;
+}
+static DEVICE_ATTR_WO(host_ctrl);
+
+static struct attribute *falcon_pcie_rc_attrs[] = {
+	&dev_attr_host_ctrl.attr,
+	NULL
+};
+
+static struct attribute_group falcon_pcie_attrgroup = {
+	.attrs	= falcon_pcie_rc_attrs,
+};
+
+static int falcon_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct falcon_pcie *pcie;
+	struct pci_host_bridge *host;
+	int err;
+
+	host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+	if (!host)
+		return -ENOMEM;
+
+	pcie = pci_host_bridge_priv(host);
+	pcie->dev = dev;
+	pcie->soc = of_device_get_match_data(dev);
+
+	/* add attributes for host_ctrl */
+	err = sysfs_create_group(&pdev->dev.kobj, &falcon_pcie_attrgroup);
+	if (err)
+		return -ENODEV;
+
+	err = falcon_pcie_setup(pcie);
+	if (err)
+		return -ENODEV;
+
+	platform_set_drvdata(pdev, pcie);
+	host->busnr = pcie->busnr;
+	host->dev.parent = pcie->dev;
+	host->ops = pcie->soc->ops;
+	host->map_irq = of_irq_parse_and_map_pci;
+	host->swizzle_irq = pci_common_swizzle;
+	host->sysdata = pcie;
+	pcie->host = host;
+#ifdef CONFIG_PCI_MSI
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		host->msi = &pcie->msi.chip;
+#endif
+	pm_qos_add_request(&pcie->qos_idle, PM_QOS_CPUIDLE_BLOCK,
+			PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
+	pcie->qos_idle.name = pdev->name;
+
+	device_init_wakeup(&pdev->dev, 1);
+	err = pci_host_probe(host);
+	pm_qos_update_request(&pcie->qos_idle, pcie->port->lpm_qos);
+
+	if (err)
+		goto put_resources;
+
+	return 0;
+
+put_resources:
+	falcon_pcie_put_resources(pcie);
+
+	return err;
+}
+
+static void falcon_pcie_free_resources(struct falcon_pcie *pcie)
+{
+	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+	struct list_head *windows = &host->windows;
+
+	pci_free_resource_list(windows);
+}
+
+static int falcon_pcie_remove(struct platform_device *pdev)
+{
+	struct falcon_pcie *pcie = platform_get_drvdata(pdev);
+	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+
+	pci_stop_root_bus(host->bus);
+	pci_remove_root_bus(host->bus);
+	falcon_pcie_free_resources(pcie);
+	falcon_pcie_put_resources(pcie);
+
+	return 0;
+}
+
+static int __maybe_unused falcon_pcie_suspend_noirq(struct device *dev)
+{
+	struct falcon_pcie *pcie = dev_get_drvdata(dev);
+	struct falcon_pcie_port *port = pcie->port;
+	if (port->suspended) {
+		return 0;
+	}
+	clk_disable_unprepare(port->axi_ck);
+	phy_power_off(port->phy);
+	phy_exit(port->phy);
+	pm_qos_update_request(&pcie->qos_idle,
+			PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
+	port->suspended = true;
+	return 0;
+}
+
+static int __maybe_unused falcon_pcie_resume_noirq(struct device *dev)
+{
+	struct falcon_pcie *pcie = dev_get_drvdata(dev);
+	struct falcon_pcie_port *port = pcie->port;
+	if (!port->suspended) {
+		return 0;
+	}
+	pm_qos_update_request(&pcie->qos_idle, port->lpm_qos);
+	falcon_pcie_enable_port(port);
+	port->suspended = false;
+
+	return 0;
+}
+
+static const struct dev_pm_ops falcon_pcie_pm_ops = {
+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(falcon_pcie_suspend_noirq,
+				      falcon_pcie_resume_noirq)
+};
+
+static const struct falcon_pcie_soc falcon_pcie_soc = {
+	.ops = &falcon_pcie_ops,
+};
+
+static const struct of_device_id falcon_pcie_ids[] = {
+	{ .compatible = "asr,falcon-pcie", .data = &falcon_pcie_soc },
+	{},
+};
+
+static struct platform_driver falcon_pcie_driver = {
+	.probe = falcon_pcie_probe,
+	.remove = falcon_pcie_remove,
+	.driver = {
+		.name = "pcie-falcon",
+		.of_match_table = falcon_pcie_ids,
+		.suppress_bind_attrs = true,
+		.pm = &falcon_pcie_pm_ops,
+	},
+};
+
+/* Falcon PCIe driver does not allow module unload */
+static int __init falcon_pcie_init(void)
+{
+	return platform_driver_probe(&falcon_pcie_driver, falcon_pcie_probe);
+}
+device_initcall_sync(falcon_pcie_init);
+
+MODULE_DESCRIPTION("ASR Falcon PCIe Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/marvell/linux/drivers/pci/controller/pcie-falcon.h b/marvell/linux/drivers/pci/controller/pcie-falcon.h
new file mode 100644
index 0000000..3ce0211
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pcie-falcon.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2021 ASR Ltd.
+ */
+#ifndef __PCIE_FALCON_H__
+#define __PCIE_FALCON_H__
+
+/* Host Bridge Identification */
+#define DEVICE_NAME "ASR Falcon PCIe Host"
+#define FALCON_MSI_IRQS_NUM			32
+
+#define PHY1_BASE	0xd4210400
+#define PHY2_BASE	0xd4220400
+
+#ifdef CONFIG_CPU_ASR1903
+#define APMU_PCIE_CLK_RST_CTRL	0x170
+#else
+#define APMU_PCIE_CLK_RST_CTRL	0x164
+#define APMU_PCIE2_CLK_RST_CTRL	0x16C
+#endif
+#define PCIE_RESET_ASSERT       (~(0xff))
+#define PCIE_RESET_DEASSERT     (0xff)
+
+#define INT_STATUS1				0xc
+#define INT_STATUS2				0x10
+#define RX_MSI_INT				(0x1<<28)
+
+#define PCIE_CFGCTRL				0x84
+#define PCIE_BAR0				0xE4
+#define PCIE_BAR1				0xE8
+#define PCIE_CFGNUM				0x140
+#define IMSI_ADDR				0x190
+
+#define FALCON_PCIE_CONFIG_OFFSET 		0x1000
+
+#define CFG_DEVICE_VENDOR			0x1000
+#define CFG_STATUS_COMMAND			0x1004
+#define CFG_BAR0_REG				0x1010
+#define CFG_BAR1_REG				0x1014
+#define CFG_SUBSEC_PRIM				0x1018
+#define CFG_DEVICE_CAP				0x1084
+#define CFG_DEVICE_CTRL				0x1088
+#define CFG_DEVICE_GEN_SPEED			0x10B0
+
+#define BAR0_ADDR			0xF0000000
+
+#define PUPHY_LTSSM			0x8
+
+#define PUPHY_CLK_CFG		0x408
+#define PUPHY_MODE_CFG		0x40c
+#define PUPHY_PLL_REG1		0x448
+
+#define INTX_SHIFT		16
+
+/* Host Bridge Internal Registers */
+#define XR3PCI_BASIC_STATUS		0x18
+#define    XR3PCI_BS_LINK_MASK		0xff
+#define    XR3PCI_BS_GEN_MASK		(0xf << 8)
+#define    XR3PCI_BS_NEG_PAYLOAD_MASK	(0xf << 24)
+#define    XR3PCI_BS_NEG_REQSIZE_MASK   (0xf << 28)
+
+#define XR3PCI_LOCAL_INT_MASK		0x180
+#define XR3PCI_LOCAL_INT_STATUS		0x184
+#define XR3PCI_HOST_INT_STATUS		0x18C
+#define XR3PCI_MSI_INT_STATUS		0x194
+
+#define XR3PCI_INT_A			(1 << 24)
+#define XR3PCI_INT_B			(1 << 25)
+#define XR3PCI_INT_C			(1 << 26)
+#define XR3PCI_INT_D			(1 << 27)
+#define XR3PCI_INT_INTx			(XR3PCI_INT_A | XR3PCI_INT_B | \
+					XR3PCI_INT_C | XR3PCI_INT_D)
+#define XR3PCI_INT_MSI			(1 << 28)
+
+#define XR3PCI_VIRTCHAN_CREDITS		0x90
+#define XR3PCI_PEX_SPC2				0xd8
+
+/* Address Translation Register */
+#define XR3PCI_ATR_PCIE_WIN0		0x600
+#define XR3PCI_ATR_PCIE_WIN1		0x700
+#define XR3PCI_ATR_AXI4_SLV0		0x800
+
+#define XR3PCI_ATR_TABLE_SIZE		0x20
+#define XR3PCI_ATR_SRC_ADDR_LOW		0x0
+#define XR3PCI_ATR_SRC_ADDR_HIGH	0x4
+#define XR3PCI_ATR_TRSL_ADDR_LOW	0x8
+#define XR3PCI_ATR_TRSL_ADDR_HIGH	0xc
+#define XR3PCI_ATR_TRSL_PARAM		0x10
+
+/* IDs used in the XR3PCI_ATR_TRSL_PARAM */
+#define XR3PCI_ATR_TRSLID_AXIDEVICE	(0x420004)
+#define XR3PCI_ATR_TRSLID_AXIMEMORY	0x4
+#define XR3PCI_ATR_TRSLID_PCIE_CONF	0x0
+#define XR3PCI_ATR_TRSLID_PCIE_IO	(0x020000)
+#define XR3PCI_ATR_TRSLID_PCIE_MEMORY	(0x000000)
+
+#endif
+
diff --git a/marvell/linux/drivers/pci/controller/pcie-iproc-bcma.c b/marvell/linux/drivers/pci/controller/pcie-iproc-bcma.c
new file mode 100644
index 0000000..aa55b06
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pcie-iproc-bcma.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015 Broadcom Corporation
+ * Copyright (C) 2015 Hauke Mehrtens <hauke@hauke-m.de>
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/phy/phy.h>
+#include <linux/bcma/bcma.h>
+#include <linux/ioport.h>
+
+#include "pcie-iproc.h"
+
+
+/* NS: CLASS field is R/O, and set to wrong 0x200 value */
+static void bcma_pcie2_fixup_class(struct pci_dev *dev)
+{
+	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8011, bcma_pcie2_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8012, bcma_pcie2_fixup_class);
+
+static int iproc_pcie_bcma_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+	struct iproc_pcie *pcie = dev->sysdata;
+	struct bcma_device *bdev = container_of(pcie->dev, struct bcma_device, dev);
+
+	return bcma_core_irq(bdev, 5);
+}
+
+static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
+{
+	struct device *dev = &bdev->dev;
+	struct iproc_pcie *pcie;
+	LIST_HEAD(resources);
+	struct pci_host_bridge *bridge;
+	int ret;
+
+	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+	if (!bridge)
+		return -ENOMEM;
+
+	pcie = pci_host_bridge_priv(bridge);
+
+	pcie->dev = dev;
+
+	pcie->type = IPROC_PCIE_PAXB_BCMA;
+	pcie->base = bdev->io_addr;
+	if (!pcie->base) {
+		dev_err(dev, "no controller registers\n");
+		return -ENOMEM;
+	}
+
+	pcie->base_addr = bdev->addr;
+
+	pcie->mem.start = bdev->addr_s[0];
+	pcie->mem.end = bdev->addr_s[0] + SZ_128M - 1;
+	pcie->mem.name = "PCIe MEM space";
+	pcie->mem.flags = IORESOURCE_MEM;
+	pci_add_resource(&resources, &pcie->mem);
+
+	pcie->map_irq = iproc_pcie_bcma_map_irq;
+
+	ret = iproc_pcie_setup(pcie, &resources);
+	if (ret) {
+		dev_err(dev, "PCIe controller setup failed\n");
+		pci_free_resource_list(&resources);
+		return ret;
+	}
+
+	bcma_set_drvdata(bdev, pcie);
+	return 0;
+}
+
+static void iproc_pcie_bcma_remove(struct bcma_device *bdev)
+{
+	struct iproc_pcie *pcie = bcma_get_drvdata(bdev);
+
+	iproc_pcie_remove(pcie);
+}
+
+static const struct bcma_device_id iproc_pcie_bcma_table[] = {
+	BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_PCIEG2, BCMA_ANY_REV, BCMA_ANY_CLASS),
+	{},
+};
+MODULE_DEVICE_TABLE(bcma, iproc_pcie_bcma_table);
+
+static struct bcma_driver iproc_pcie_bcma_driver = {
+	.name		= KBUILD_MODNAME,
+	.id_table	= iproc_pcie_bcma_table,
+	.probe		= iproc_pcie_bcma_probe,
+	.remove		= iproc_pcie_bcma_remove,
+};
+
+static int __init iproc_pcie_bcma_init(void)
+{
+	return bcma_driver_register(&iproc_pcie_bcma_driver);
+}
+module_init(iproc_pcie_bcma_init);
+
+static void __exit iproc_pcie_bcma_exit(void)
+{
+	bcma_driver_unregister(&iproc_pcie_bcma_driver);
+}
+module_exit(iproc_pcie_bcma_exit);
+
+MODULE_AUTHOR("Hauke Mehrtens");
+MODULE_DESCRIPTION("Broadcom iProc PCIe BCMA driver");
+MODULE_LICENSE("GPL v2");
diff --git a/marvell/linux/drivers/pci/controller/pcie-iproc-msi.c b/marvell/linux/drivers/pci/controller/pcie-iproc-msi.c
new file mode 100644
index 0000000..0cb2fa1
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pcie-iproc-msi.c
@@ -0,0 +1,683 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015 Broadcom Corporation
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+
+#include "pcie-iproc.h"
+
+#define IPROC_MSI_INTR_EN_SHIFT        11
+#define IPROC_MSI_INTR_EN              BIT(IPROC_MSI_INTR_EN_SHIFT)
+#define IPROC_MSI_INT_N_EVENT_SHIFT    1
+#define IPROC_MSI_INT_N_EVENT          BIT(IPROC_MSI_INT_N_EVENT_SHIFT)
+#define IPROC_MSI_EQ_EN_SHIFT          0
+#define IPROC_MSI_EQ_EN                BIT(IPROC_MSI_EQ_EN_SHIFT)
+
+#define IPROC_MSI_EQ_MASK              0x3f
+
+/* Max number of GIC interrupts */
+#define NR_HW_IRQS                     6
+
+/* Number of entries in each event queue */
+#define EQ_LEN                         64
+
+/* Size of each event queue memory region */
+#define EQ_MEM_REGION_SIZE             SZ_4K
+
+/* Size of each MSI address region */
+#define MSI_MEM_REGION_SIZE            SZ_4K
+
+enum iproc_msi_reg {
+	IPROC_MSI_EQ_PAGE = 0,
+	IPROC_MSI_EQ_PAGE_UPPER,
+	IPROC_MSI_PAGE,
+	IPROC_MSI_PAGE_UPPER,
+	IPROC_MSI_CTRL,
+	IPROC_MSI_EQ_HEAD,
+	IPROC_MSI_EQ_TAIL,
+	IPROC_MSI_INTS_EN,
+	IPROC_MSI_REG_SIZE,
+};
+
+struct iproc_msi;
+
+/**
+ * iProc MSI group
+ *
+ * One MSI group is allocated per GIC interrupt, serviced by one iProc MSI
+ * event queue.
+ *
+ * @msi: pointer to iProc MSI data
+ * @gic_irq: GIC interrupt
+ * @eq: Event queue number
+ */
+struct iproc_msi_grp {
+	struct iproc_msi *msi;
+	int gic_irq;
+	unsigned int eq;
+};
+
+/**
+ * iProc event queue based MSI
+ *
+ * Only meant to be used on platforms without MSI support integrated into the
+ * GIC.
+ *
+ * @pcie: pointer to iProc PCIe data
+ * @reg_offsets: MSI register offsets
+ * @grps: MSI groups
+ * @nr_irqs: number of total interrupts connected to GIC
+ * @nr_cpus: number of toal CPUs
+ * @has_inten_reg: indicates the MSI interrupt enable register needs to be
+ * set explicitly (required for some legacy platforms)
+ * @bitmap: MSI vector bitmap
+ * @bitmap_lock: lock to protect access to the MSI bitmap
+ * @nr_msi_vecs: total number of MSI vectors
+ * @inner_domain: inner IRQ domain
+ * @msi_domain: MSI IRQ domain
+ * @nr_eq_region: required number of 4K aligned memory region for MSI event
+ * queues
+ * @nr_msi_region: required number of 4K aligned address region for MSI posted
+ * writes
+ * @eq_cpu: pointer to allocated memory region for MSI event queues
+ * @eq_dma: DMA address of MSI event queues
+ * @msi_addr: MSI address
+ */
+struct iproc_msi {
+	struct iproc_pcie *pcie;
+	const u16 (*reg_offsets)[IPROC_MSI_REG_SIZE];
+	struct iproc_msi_grp *grps;
+	int nr_irqs;
+	int nr_cpus;
+	bool has_inten_reg;
+	unsigned long *bitmap;
+	struct mutex bitmap_lock;
+	unsigned int nr_msi_vecs;
+	struct irq_domain *inner_domain;
+	struct irq_domain *msi_domain;
+	unsigned int nr_eq_region;
+	unsigned int nr_msi_region;
+	void *eq_cpu;
+	dma_addr_t eq_dma;
+	phys_addr_t msi_addr;
+};
+
+static const u16 iproc_msi_reg_paxb[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = {
+	{ 0x200, 0x2c0, 0x204, 0x2c4, 0x210, 0x250, 0x254, 0x208 },
+	{ 0x200, 0x2c0, 0x204, 0x2c4, 0x214, 0x258, 0x25c, 0x208 },
+	{ 0x200, 0x2c0, 0x204, 0x2c4, 0x218, 0x260, 0x264, 0x208 },
+	{ 0x200, 0x2c0, 0x204, 0x2c4, 0x21c, 0x268, 0x26c, 0x208 },
+	{ 0x200, 0x2c0, 0x204, 0x2c4, 0x220, 0x270, 0x274, 0x208 },
+	{ 0x200, 0x2c0, 0x204, 0x2c4, 0x224, 0x278, 0x27c, 0x208 },
+};
+
+static const u16 iproc_msi_reg_paxc[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = {
+	{ 0xc00, 0xc04, 0xc08, 0xc0c, 0xc40, 0xc50, 0xc60 },
+	{ 0xc10, 0xc14, 0xc18, 0xc1c, 0xc44, 0xc54, 0xc64 },
+	{ 0xc20, 0xc24, 0xc28, 0xc2c, 0xc48, 0xc58, 0xc68 },
+	{ 0xc30, 0xc34, 0xc38, 0xc3c, 0xc4c, 0xc5c, 0xc6c },
+};
+
+static inline u32 iproc_msi_read_reg(struct iproc_msi *msi,
+				     enum iproc_msi_reg reg,
+				     unsigned int eq)
+{
+	struct iproc_pcie *pcie = msi->pcie;
+
+	return readl_relaxed(pcie->base + msi->reg_offsets[eq][reg]);
+}
+
+static inline void iproc_msi_write_reg(struct iproc_msi *msi,
+				       enum iproc_msi_reg reg,
+				       int eq, u32 val)
+{
+	struct iproc_pcie *pcie = msi->pcie;
+
+	writel_relaxed(val, pcie->base + msi->reg_offsets[eq][reg]);
+}
+
+static inline u32 hwirq_to_group(struct iproc_msi *msi, unsigned long hwirq)
+{
+	return (hwirq % msi->nr_irqs);
+}
+
+static inline unsigned int iproc_msi_addr_offset(struct iproc_msi *msi,
+						 unsigned long hwirq)
+{
+	if (msi->nr_msi_region > 1)
+		return hwirq_to_group(msi, hwirq) * MSI_MEM_REGION_SIZE;
+	else
+		return hwirq_to_group(msi, hwirq) * sizeof(u32);
+}
+
+static inline unsigned int iproc_msi_eq_offset(struct iproc_msi *msi, u32 eq)
+{
+	if (msi->nr_eq_region > 1)
+		return eq * EQ_MEM_REGION_SIZE;
+	else
+		return eq * EQ_LEN * sizeof(u32);
+}
+
+static struct irq_chip iproc_msi_irq_chip = {
+	.name = "iProc-MSI",
+};
+
+static struct msi_domain_info iproc_msi_domain_info = {
+	.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+		MSI_FLAG_PCI_MSIX,
+	.chip = &iproc_msi_irq_chip,
+};
+
+/*
+ * In iProc PCIe core, each MSI group is serviced by a GIC interrupt and a
+ * dedicated event queue.  Each MSI group can support up to 64 MSI vectors.
+ *
+ * The number of MSI groups varies between different iProc SoCs.  The total
+ * number of CPU cores also varies.  To support MSI IRQ affinity, we
+ * distribute GIC interrupts across all available CPUs.  MSI vector is moved
+ * from one GIC interrupt to another to steer to the target CPU.
+ *
+ * Assuming:
+ * - the number of MSI groups is M
+ * - the number of CPU cores is N
+ * - M is always a multiple of N
+ *
+ * Total number of raw MSI vectors = M * 64
+ * Total number of supported MSI vectors = (M * 64) / N
+ */
+static inline int hwirq_to_cpu(struct iproc_msi *msi, unsigned long hwirq)
+{
+	return (hwirq % msi->nr_cpus);
+}
+
+static inline unsigned long hwirq_to_canonical_hwirq(struct iproc_msi *msi,
+						     unsigned long hwirq)
+{
+	return (hwirq - hwirq_to_cpu(msi, hwirq));
+}
+
+static int iproc_msi_irq_set_affinity(struct irq_data *data,
+				      const struct cpumask *mask, bool force)
+{
+	struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
+	int target_cpu = cpumask_first(mask);
+	int curr_cpu;
+	int ret;
+
+	curr_cpu = hwirq_to_cpu(msi, data->hwirq);
+	if (curr_cpu == target_cpu)
+		ret = IRQ_SET_MASK_OK_DONE;
+	else {
+		/* steer MSI to the target CPU */
+		data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
+		ret = IRQ_SET_MASK_OK;
+	}
+
+	irq_data_update_effective_affinity(data, cpumask_of(target_cpu));
+
+	return ret;
+}
+
+static void iproc_msi_irq_compose_msi_msg(struct irq_data *data,
+					  struct msi_msg *msg)
+{
+	struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
+	dma_addr_t addr;
+
+	addr = msi->msi_addr + iproc_msi_addr_offset(msi, data->hwirq);
+	msg->address_lo = lower_32_bits(addr);
+	msg->address_hi = upper_32_bits(addr);
+	msg->data = data->hwirq << 5;
+}
+
+static struct irq_chip iproc_msi_bottom_irq_chip = {
+	.name = "MSI",
+	.irq_set_affinity = iproc_msi_irq_set_affinity,
+	.irq_compose_msi_msg = iproc_msi_irq_compose_msi_msg,
+};
+
+static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
+				      unsigned int virq, unsigned int nr_irqs,
+				      void *args)
+{
+	struct iproc_msi *msi = domain->host_data;
+	int hwirq, i;
+
+	if (msi->nr_cpus > 1 && nr_irqs > 1)
+		return -EINVAL;
+
+	mutex_lock(&msi->bitmap_lock);
+
+	/*
+	 * Allocate 'nr_irqs' multiplied by 'nr_cpus' number of MSI vectors
+	 * each time
+	 */
+	hwirq = bitmap_find_free_region(msi->bitmap, msi->nr_msi_vecs,
+					order_base_2(msi->nr_cpus * nr_irqs));
+
+	mutex_unlock(&msi->bitmap_lock);
+
+	if (hwirq < 0)
+		return -ENOSPC;
+
+	for (i = 0; i < nr_irqs; i++) {
+		irq_domain_set_info(domain, virq + i, hwirq + i,
+				    &iproc_msi_bottom_irq_chip,
+				    domain->host_data, handle_simple_irq,
+				    NULL, NULL);
+	}
+
+	return 0;
+}
+
+static void iproc_msi_irq_domain_free(struct irq_domain *domain,
+				      unsigned int virq, unsigned int nr_irqs)
+{
+	struct irq_data *data = irq_domain_get_irq_data(domain, virq);
+	struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
+	unsigned int hwirq;
+
+	mutex_lock(&msi->bitmap_lock);
+
+	hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq);
+	bitmap_release_region(msi->bitmap, hwirq,
+			      order_base_2(msi->nr_cpus * nr_irqs));
+
+	mutex_unlock(&msi->bitmap_lock);
+
+	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+	.alloc = iproc_msi_irq_domain_alloc,
+	.free = iproc_msi_irq_domain_free,
+};
+
+static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head)
+{
+	u32 *msg, hwirq;
+	unsigned int offs;
+
+	offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32);
+	msg = (u32 *)(msi->eq_cpu + offs);
+	hwirq = readl(msg);
+	hwirq = (hwirq >> 5) + (hwirq & 0x1f);
+
+	/*
+	 * Since we have multiple hwirq mapped to a single MSI vector,
+	 * now we need to derive the hwirq at CPU0.  It can then be used to
+	 * mapped back to virq.
+	 */
+	return hwirq_to_canonical_hwirq(msi, hwirq);
+}
+
+static void iproc_msi_handler(struct irq_desc *desc)
+{
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct iproc_msi_grp *grp;
+	struct iproc_msi *msi;
+	u32 eq, head, tail, nr_events;
+	unsigned long hwirq;
+	int virq;
+
+	chained_irq_enter(chip, desc);
+
+	grp = irq_desc_get_handler_data(desc);
+	msi = grp->msi;
+	eq = grp->eq;
+
+	/*
+	 * iProc MSI event queue is tracked by head and tail pointers.  Head
+	 * pointer indicates the next entry (MSI data) to be consumed by SW in
+	 * the queue and needs to be updated by SW.  iProc MSI core uses the
+	 * tail pointer as the next data insertion point.
+	 *
+	 * Entries between head and tail pointers contain valid MSI data.  MSI
+	 * data is guaranteed to be in the event queue memory before the tail
+	 * pointer is updated by the iProc MSI core.
+	 */
+	head = iproc_msi_read_reg(msi, IPROC_MSI_EQ_HEAD,
+				  eq) & IPROC_MSI_EQ_MASK;
+	do {
+		tail = iproc_msi_read_reg(msi, IPROC_MSI_EQ_TAIL,
+					  eq) & IPROC_MSI_EQ_MASK;
+
+		/*
+		 * Figure out total number of events (MSI data) to be
+		 * processed.
+		 */
+		nr_events = (tail < head) ?
+			(EQ_LEN - (head - tail)) : (tail - head);
+		if (!nr_events)
+			break;
+
+		/* process all outstanding events */
+		while (nr_events--) {
+			hwirq = decode_msi_hwirq(msi, eq, head);
+			virq = irq_find_mapping(msi->inner_domain, hwirq);
+			generic_handle_irq(virq);
+
+			head++;
+			head %= EQ_LEN;
+		}
+
+		/*
+		 * Now all outstanding events have been processed.  Update the
+		 * head pointer.
+		 */
+		iproc_msi_write_reg(msi, IPROC_MSI_EQ_HEAD, eq, head);
+
+		/*
+		 * Now go read the tail pointer again to see if there are new
+		 * outstanding events that came in during the above window.
+		 */
+	} while (true);
+
+	chained_irq_exit(chip, desc);
+}
+
+static void iproc_msi_enable(struct iproc_msi *msi)
+{
+	int i, eq;
+	u32 val;
+
+	/* Program memory region for each event queue */
+	for (i = 0; i < msi->nr_eq_region; i++) {
+		dma_addr_t addr = msi->eq_dma + (i * EQ_MEM_REGION_SIZE);
+
+		iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE, i,
+				    lower_32_bits(addr));
+		iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE_UPPER, i,
+				    upper_32_bits(addr));
+	}
+
+	/* Program address region for MSI posted writes */
+	for (i = 0; i < msi->nr_msi_region; i++) {
+		phys_addr_t addr = msi->msi_addr + (i * MSI_MEM_REGION_SIZE);
+
+		iproc_msi_write_reg(msi, IPROC_MSI_PAGE, i,
+				    lower_32_bits(addr));
+		iproc_msi_write_reg(msi, IPROC_MSI_PAGE_UPPER, i,
+				    upper_32_bits(addr));
+	}
+
+	for (eq = 0; eq < msi->nr_irqs; eq++) {
+		/* Enable MSI event queue */
+		val = IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT |
+			IPROC_MSI_EQ_EN;
+		iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val);
+
+		/*
+		 * Some legacy platforms require the MSI interrupt enable
+		 * register to be set explicitly.
+		 */
+		if (msi->has_inten_reg) {
+			val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq);
+			val |= BIT(eq);
+			iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val);
+		}
+	}
+}
+
+static void iproc_msi_disable(struct iproc_msi *msi)
+{
+	u32 eq, val;
+
+	for (eq = 0; eq < msi->nr_irqs; eq++) {
+		if (msi->has_inten_reg) {
+			val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq);
+			val &= ~BIT(eq);
+			iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val);
+		}
+
+		val = iproc_msi_read_reg(msi, IPROC_MSI_CTRL, eq);
+		val &= ~(IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT |
+			 IPROC_MSI_EQ_EN);
+		iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val);
+	}
+}
+
+static int iproc_msi_alloc_domains(struct device_node *node,
+				   struct iproc_msi *msi)
+{
+	msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_msi_vecs,
+						  &msi_domain_ops, msi);
+	if (!msi->inner_domain)
+		return -ENOMEM;
+
+	msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
+						    &iproc_msi_domain_info,
+						    msi->inner_domain);
+	if (!msi->msi_domain) {
+		irq_domain_remove(msi->inner_domain);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void iproc_msi_free_domains(struct iproc_msi *msi)
+{
+	if (msi->msi_domain)
+		irq_domain_remove(msi->msi_domain);
+
+	if (msi->inner_domain)
+		irq_domain_remove(msi->inner_domain);
+}
+
+static void iproc_msi_irq_free(struct iproc_msi *msi, unsigned int cpu)
+{
+	int i;
+
+	for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) {
+		irq_set_chained_handler_and_data(msi->grps[i].gic_irq,
+						 NULL, NULL);
+	}
+}
+
+static int iproc_msi_irq_setup(struct iproc_msi *msi, unsigned int cpu)
+{
+	int i, ret;
+	cpumask_var_t mask;
+	struct iproc_pcie *pcie = msi->pcie;
+
+	for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) {
+		irq_set_chained_handler_and_data(msi->grps[i].gic_irq,
+						 iproc_msi_handler,
+						 &msi->grps[i]);
+		/* Dedicate GIC interrupt to each CPU core */
+		if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
+			cpumask_clear(mask);
+			cpumask_set_cpu(cpu, mask);
+			ret = irq_set_affinity(msi->grps[i].gic_irq, mask);
+			if (ret)
+				dev_err(pcie->dev,
+					"failed to set affinity for IRQ%d\n",
+					msi->grps[i].gic_irq);
+			free_cpumask_var(mask);
+		} else {
+			dev_err(pcie->dev, "failed to alloc CPU mask\n");
+			ret = -EINVAL;
+		}
+
+		if (ret) {
+			/* Free all configured/unconfigured IRQs */
+			iproc_msi_irq_free(msi, cpu);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
+{
+	struct iproc_msi *msi;
+	int i, ret;
+	unsigned int cpu;
+
+	if (!of_device_is_compatible(node, "brcm,iproc-msi"))
+		return -ENODEV;
+
+	if (!of_find_property(node, "msi-controller", NULL))
+		return -ENODEV;
+
+	if (pcie->msi)
+		return -EBUSY;
+
+	msi = devm_kzalloc(pcie->dev, sizeof(*msi), GFP_KERNEL);
+	if (!msi)
+		return -ENOMEM;
+
+	msi->pcie = pcie;
+	pcie->msi = msi;
+	msi->msi_addr = pcie->base_addr;
+	mutex_init(&msi->bitmap_lock);
+	msi->nr_cpus = num_possible_cpus();
+
+	if (msi->nr_cpus == 1)
+		iproc_msi_domain_info.flags |=  MSI_FLAG_MULTI_PCI_MSI;
+
+	msi->nr_irqs = of_irq_count(node);
+	if (!msi->nr_irqs) {
+		dev_err(pcie->dev, "found no MSI GIC interrupt\n");
+		return -ENODEV;
+	}
+
+	if (msi->nr_irqs > NR_HW_IRQS) {
+		dev_warn(pcie->dev, "too many MSI GIC interrupts defined %d\n",
+			 msi->nr_irqs);
+		msi->nr_irqs = NR_HW_IRQS;
+	}
+
+	if (msi->nr_irqs < msi->nr_cpus) {
+		dev_err(pcie->dev,
+			"not enough GIC interrupts for MSI affinity\n");
+		return -EINVAL;
+	}
+
+	if (msi->nr_irqs % msi->nr_cpus != 0) {
+		msi->nr_irqs -= msi->nr_irqs % msi->nr_cpus;
+		dev_warn(pcie->dev, "Reducing number of interrupts to %d\n",
+			 msi->nr_irqs);
+	}
+
+	switch (pcie->type) {
+	case IPROC_PCIE_PAXB_BCMA:
+	case IPROC_PCIE_PAXB:
+		msi->reg_offsets = iproc_msi_reg_paxb;
+		msi->nr_eq_region = 1;
+		msi->nr_msi_region = 1;
+		break;
+	case IPROC_PCIE_PAXC:
+		msi->reg_offsets = iproc_msi_reg_paxc;
+		msi->nr_eq_region = msi->nr_irqs;
+		msi->nr_msi_region = msi->nr_irqs;
+		break;
+	default:
+		dev_err(pcie->dev, "incompatible iProc PCIe interface\n");
+		return -EINVAL;
+	}
+
+	if (of_find_property(node, "brcm,pcie-msi-inten", NULL))
+		msi->has_inten_reg = true;
+
+	msi->nr_msi_vecs = msi->nr_irqs * EQ_LEN;
+	msi->bitmap = devm_kcalloc(pcie->dev, BITS_TO_LONGS(msi->nr_msi_vecs),
+				   sizeof(*msi->bitmap), GFP_KERNEL);
+	if (!msi->bitmap)
+		return -ENOMEM;
+
+	msi->grps = devm_kcalloc(pcie->dev, msi->nr_irqs, sizeof(*msi->grps),
+				 GFP_KERNEL);
+	if (!msi->grps)
+		return -ENOMEM;
+
+	for (i = 0; i < msi->nr_irqs; i++) {
+		unsigned int irq = irq_of_parse_and_map(node, i);
+
+		if (!irq) {
+			dev_err(pcie->dev, "unable to parse/map interrupt\n");
+			ret = -ENODEV;
+			goto free_irqs;
+		}
+		msi->grps[i].gic_irq = irq;
+		msi->grps[i].msi = msi;
+		msi->grps[i].eq = i;
+	}
+
+	/* Reserve memory for event queue and make sure memories are zeroed */
+	msi->eq_cpu = dma_alloc_coherent(pcie->dev,
+					 msi->nr_eq_region * EQ_MEM_REGION_SIZE,
+					 &msi->eq_dma, GFP_KERNEL);
+	if (!msi->eq_cpu) {
+		ret = -ENOMEM;
+		goto free_irqs;
+	}
+
+	ret = iproc_msi_alloc_domains(node, msi);
+	if (ret) {
+		dev_err(pcie->dev, "failed to create MSI domains\n");
+		goto free_eq_dma;
+	}
+
+	for_each_online_cpu(cpu) {
+		ret = iproc_msi_irq_setup(msi, cpu);
+		if (ret)
+			goto free_msi_irq;
+	}
+
+	iproc_msi_enable(msi);
+
+	return 0;
+
+free_msi_irq:
+	for_each_online_cpu(cpu)
+		iproc_msi_irq_free(msi, cpu);
+	iproc_msi_free_domains(msi);
+
+free_eq_dma:
+	dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE,
+			  msi->eq_cpu, msi->eq_dma);
+
+free_irqs:
+	for (i = 0; i < msi->nr_irqs; i++) {
+		if (msi->grps[i].gic_irq)
+			irq_dispose_mapping(msi->grps[i].gic_irq);
+	}
+	pcie->msi = NULL;
+	return ret;
+}
+EXPORT_SYMBOL(iproc_msi_init);
+
+void iproc_msi_exit(struct iproc_pcie *pcie)
+{
+	struct iproc_msi *msi = pcie->msi;
+	unsigned int i, cpu;
+
+	if (!msi)
+		return;
+
+	iproc_msi_disable(msi);
+
+	for_each_online_cpu(cpu)
+		iproc_msi_irq_free(msi, cpu);
+
+	iproc_msi_free_domains(msi);
+
+	dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE,
+			  msi->eq_cpu, msi->eq_dma);
+
+	for (i = 0; i < msi->nr_irqs; i++) {
+		if (msi->grps[i].gic_irq)
+			irq_dispose_mapping(msi->grps[i].gic_irq);
+	}
+}
+EXPORT_SYMBOL(iproc_msi_exit);
diff --git a/marvell/linux/drivers/pci/controller/pcie-iproc-platform.c b/marvell/linux/drivers/pci/controller/pcie-iproc-platform.c
new file mode 100644
index 0000000..9ee6200
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pcie-iproc-platform.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015 Broadcom Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+
+#include "../pci.h"
+#include "pcie-iproc.h"
+
+static const struct of_device_id iproc_pcie_of_match_table[] = {
+	{
+		.compatible = "brcm,iproc-pcie",
+		.data = (int *)IPROC_PCIE_PAXB,
+	}, {
+		.compatible = "brcm,iproc-pcie-paxb-v2",
+		.data = (int *)IPROC_PCIE_PAXB_V2,
+	}, {
+		.compatible = "brcm,iproc-pcie-paxc",
+		.data = (int *)IPROC_PCIE_PAXC,
+	}, {
+		.compatible = "brcm,iproc-pcie-paxc-v2",
+		.data = (int *)IPROC_PCIE_PAXC_V2,
+	},
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, iproc_pcie_of_match_table);
+
+static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct iproc_pcie *pcie;
+	struct device_node *np = dev->of_node;
+	struct resource reg;
+	resource_size_t iobase = 0;
+	LIST_HEAD(resources);
+	struct pci_host_bridge *bridge;
+	int ret;
+
+	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+	if (!bridge)
+		return -ENOMEM;
+
+	pcie = pci_host_bridge_priv(bridge);
+
+	pcie->dev = dev;
+	pcie->type = (enum iproc_pcie_type) of_device_get_match_data(dev);
+
+	ret = of_address_to_resource(np, 0, &reg);
+	if (ret < 0) {
+		dev_err(dev, "unable to obtain controller resources\n");
+		return ret;
+	}
+
+	pcie->base = devm_pci_remap_cfgspace(dev, reg.start,
+					     resource_size(&reg));
+	if (!pcie->base) {
+		dev_err(dev, "unable to map controller registers\n");
+		return -ENOMEM;
+	}
+	pcie->base_addr = reg.start;
+
+	if (of_property_read_bool(np, "brcm,pcie-ob")) {
+		u32 val;
+
+		ret = of_property_read_u32(np, "brcm,pcie-ob-axi-offset",
+					   &val);
+		if (ret) {
+			dev_err(dev,
+				"missing brcm,pcie-ob-axi-offset property\n");
+			return ret;
+		}
+		pcie->ob.axi_offset = val;
+		pcie->need_ob_cfg = true;
+	}
+
+	/*
+	 * DT nodes are not used by all platforms that use the iProc PCIe
+	 * core driver. For platforms that require explicit inbound mapping
+	 * configuration, "dma-ranges" would have been present in DT
+	 */
+	pcie->need_ib_cfg = of_property_read_bool(np, "dma-ranges");
+
+	/* PHY use is optional */
+	pcie->phy = devm_phy_optional_get(dev, "pcie-phy");
+	if (IS_ERR(pcie->phy))
+		return PTR_ERR(pcie->phy);
+
+	ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &resources,
+						    &iobase);
+	if (ret) {
+		dev_err(dev, "unable to get PCI host bridge resources\n");
+		return ret;
+	}
+
+	/* PAXC doesn't support legacy IRQs, skip mapping */
+	switch (pcie->type) {
+	case IPROC_PCIE_PAXC:
+	case IPROC_PCIE_PAXC_V2:
+		break;
+	default:
+		pcie->map_irq = of_irq_parse_and_map_pci;
+	}
+
+	ret = iproc_pcie_setup(pcie, &resources);
+	if (ret) {
+		dev_err(dev, "PCIe controller setup failed\n");
+		pci_free_resource_list(&resources);
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, pcie);
+	return 0;
+}
+
+static int iproc_pcie_pltfm_remove(struct platform_device *pdev)
+{
+	struct iproc_pcie *pcie = platform_get_drvdata(pdev);
+
+	return iproc_pcie_remove(pcie);
+}
+
+static void iproc_pcie_pltfm_shutdown(struct platform_device *pdev)
+{
+	struct iproc_pcie *pcie = platform_get_drvdata(pdev);
+
+	iproc_pcie_shutdown(pcie);
+}
+
+static struct platform_driver iproc_pcie_pltfm_driver = {
+	.driver = {
+		.name = "iproc-pcie",
+		.of_match_table = of_match_ptr(iproc_pcie_of_match_table),
+	},
+	.probe = iproc_pcie_pltfm_probe,
+	.remove = iproc_pcie_pltfm_remove,
+	.shutdown = iproc_pcie_pltfm_shutdown,
+};
+module_platform_driver(iproc_pcie_pltfm_driver);
+
+MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom iPROC PCIe platform driver");
+MODULE_LICENSE("GPL v2");
diff --git a/marvell/linux/drivers/pci/controller/pcie-iproc.c b/marvell/linux/drivers/pci/controller/pcie-iproc.c
new file mode 100644
index 0000000..c6b1c18
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pcie-iproc.c
@@ -0,0 +1,1637 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2014 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright (C) 2015 Broadcom Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/msi.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/mbus.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+
+#include "pcie-iproc.h"
+
+#define EP_PERST_SOURCE_SELECT_SHIFT	2
+#define EP_PERST_SOURCE_SELECT		BIT(EP_PERST_SOURCE_SELECT_SHIFT)
+#define EP_MODE_SURVIVE_PERST_SHIFT	1
+#define EP_MODE_SURVIVE_PERST		BIT(EP_MODE_SURVIVE_PERST_SHIFT)
+#define RC_PCIE_RST_OUTPUT_SHIFT	0
+#define RC_PCIE_RST_OUTPUT		BIT(RC_PCIE_RST_OUTPUT_SHIFT)
+#define PAXC_RESET_MASK			0x7f
+
+#define GIC_V3_CFG_SHIFT		0
+#define GIC_V3_CFG			BIT(GIC_V3_CFG_SHIFT)
+
+#define MSI_ENABLE_CFG_SHIFT		0
+#define MSI_ENABLE_CFG			BIT(MSI_ENABLE_CFG_SHIFT)
+
+#define CFG_IND_ADDR_MASK		0x00001ffc
+
+#define CFG_ADDR_BUS_NUM_SHIFT		20
+#define CFG_ADDR_BUS_NUM_MASK		0x0ff00000
+#define CFG_ADDR_DEV_NUM_SHIFT		15
+#define CFG_ADDR_DEV_NUM_MASK		0x000f8000
+#define CFG_ADDR_FUNC_NUM_SHIFT		12
+#define CFG_ADDR_FUNC_NUM_MASK		0x00007000
+#define CFG_ADDR_REG_NUM_SHIFT		2
+#define CFG_ADDR_REG_NUM_MASK		0x00000ffc
+#define CFG_ADDR_CFG_TYPE_SHIFT		0
+#define CFG_ADDR_CFG_TYPE_MASK		0x00000003
+
+#define SYS_RC_INTX_MASK		0xf
+
+#define PCIE_PHYLINKUP_SHIFT		3
+#define PCIE_PHYLINKUP			BIT(PCIE_PHYLINKUP_SHIFT)
+#define PCIE_DL_ACTIVE_SHIFT		2
+#define PCIE_DL_ACTIVE			BIT(PCIE_DL_ACTIVE_SHIFT)
+
+#define APB_ERR_EN_SHIFT		0
+#define APB_ERR_EN			BIT(APB_ERR_EN_SHIFT)
+
+#define CFG_RD_SUCCESS			0
+#define CFG_RD_UR			1
+#define CFG_RD_CRS			2
+#define CFG_RD_CA			3
+#define CFG_RETRY_STATUS		0xffff0001
+#define CFG_RETRY_STATUS_TIMEOUT_US	500000 /* 500 milliseconds */
+
+/* derive the enum index of the outbound/inbound mapping registers */
+#define MAP_REG(base_reg, index)	((base_reg) + (index) * 2)
+
+/*
+ * Maximum number of outbound mapping window sizes that can be supported by any
+ * OARR/OMAP mapping pair
+ */
+#define MAX_NUM_OB_WINDOW_SIZES		4
+
+#define OARR_VALID_SHIFT		0
+#define OARR_VALID			BIT(OARR_VALID_SHIFT)
+#define OARR_SIZE_CFG_SHIFT		1
+
+/*
+ * Maximum number of inbound mapping region sizes that can be supported by an
+ * IARR
+ */
+#define MAX_NUM_IB_REGION_SIZES		9
+
+#define IMAP_VALID_SHIFT		0
+#define IMAP_VALID			BIT(IMAP_VALID_SHIFT)
+
+#define IPROC_PCI_PM_CAP		0x48
+#define IPROC_PCI_PM_CAP_MASK		0xffff
+#define IPROC_PCI_EXP_CAP		0xac
+
+#define IPROC_PCIE_REG_INVALID		0xffff
+
+/**
+ * iProc PCIe outbound mapping controller specific parameters
+ *
+ * @window_sizes: list of supported outbound mapping window sizes in MB
+ * @nr_sizes: number of supported outbound mapping window sizes
+ */
+struct iproc_pcie_ob_map {
+	resource_size_t window_sizes[MAX_NUM_OB_WINDOW_SIZES];
+	unsigned int nr_sizes;
+};
+
+static const struct iproc_pcie_ob_map paxb_ob_map[] = {
+	{
+		/* OARR0/OMAP0 */
+		.window_sizes = { 128, 256 },
+		.nr_sizes = 2,
+	},
+	{
+		/* OARR1/OMAP1 */
+		.window_sizes = { 128, 256 },
+		.nr_sizes = 2,
+	},
+};
+
+static const struct iproc_pcie_ob_map paxb_v2_ob_map[] = {
+	{
+		/* OARR0/OMAP0 */
+		.window_sizes = { 128, 256 },
+		.nr_sizes = 2,
+	},
+	{
+		/* OARR1/OMAP1 */
+		.window_sizes = { 128, 256 },
+		.nr_sizes = 2,
+	},
+	{
+		/* OARR2/OMAP2 */
+		.window_sizes = { 128, 256, 512, 1024 },
+		.nr_sizes = 4,
+	},
+	{
+		/* OARR3/OMAP3 */
+		.window_sizes = { 128, 256, 512, 1024 },
+		.nr_sizes = 4,
+	},
+};
+
+/**
+ * iProc PCIe inbound mapping type
+ */
+enum iproc_pcie_ib_map_type {
+	/* for DDR memory */
+	IPROC_PCIE_IB_MAP_MEM = 0,
+
+	/* for device I/O memory */
+	IPROC_PCIE_IB_MAP_IO,
+
+	/* invalid or unused */
+	IPROC_PCIE_IB_MAP_INVALID
+};
+
+/**
+ * iProc PCIe inbound mapping controller specific parameters
+ *
+ * @type: inbound mapping region type
+ * @size_unit: inbound mapping region size unit, could be SZ_1K, SZ_1M, or
+ * SZ_1G
+ * @region_sizes: list of supported inbound mapping region sizes in KB, MB, or
+ * GB, depending on the size unit
+ * @nr_sizes: number of supported inbound mapping region sizes
+ * @nr_windows: number of supported inbound mapping windows for the region
+ * @imap_addr_offset: register offset between the upper and lower 32-bit
+ * IMAP address registers
+ * @imap_window_offset: register offset between each IMAP window
+ */
+struct iproc_pcie_ib_map {
+	enum iproc_pcie_ib_map_type type;
+	unsigned int size_unit;
+	resource_size_t region_sizes[MAX_NUM_IB_REGION_SIZES];
+	unsigned int nr_sizes;
+	unsigned int nr_windows;
+	u16 imap_addr_offset;
+	u16 imap_window_offset;
+};
+
+static const struct iproc_pcie_ib_map paxb_v2_ib_map[] = {
+	{
+		/* IARR0/IMAP0 */
+		.type = IPROC_PCIE_IB_MAP_IO,
+		.size_unit = SZ_1K,
+		.region_sizes = { 32 },
+		.nr_sizes = 1,
+		.nr_windows = 8,
+		.imap_addr_offset = 0x40,
+		.imap_window_offset = 0x4,
+	},
+	{
+		/* IARR1/IMAP1 (currently unused) */
+		.type = IPROC_PCIE_IB_MAP_INVALID,
+	},
+	{
+		/* IARR2/IMAP2 */
+		.type = IPROC_PCIE_IB_MAP_MEM,
+		.size_unit = SZ_1M,
+		.region_sizes = { 64, 128, 256, 512, 1024, 2048, 4096, 8192,
+				  16384 },
+		.nr_sizes = 9,
+		.nr_windows = 1,
+		.imap_addr_offset = 0x4,
+		.imap_window_offset = 0x8,
+	},
+	{
+		/* IARR3/IMAP3 */
+		.type = IPROC_PCIE_IB_MAP_MEM,
+		.size_unit = SZ_1G,
+		.region_sizes = { 1, 2, 4, 8, 16, 32 },
+		.nr_sizes = 6,
+		.nr_windows = 8,
+		.imap_addr_offset = 0x4,
+		.imap_window_offset = 0x8,
+	},
+	{
+		/* IARR4/IMAP4 */
+		.type = IPROC_PCIE_IB_MAP_MEM,
+		.size_unit = SZ_1G,
+		.region_sizes = { 32, 64, 128, 256, 512 },
+		.nr_sizes = 5,
+		.nr_windows = 8,
+		.imap_addr_offset = 0x4,
+		.imap_window_offset = 0x8,
+	},
+};
+
+/*
+ * iProc PCIe host registers
+ */
+enum iproc_pcie_reg {
+	/* clock/reset signal control */
+	IPROC_PCIE_CLK_CTRL = 0,
+
+	/*
+	 * To allow MSI to be steered to an external MSI controller (e.g., ARM
+	 * GICv3 ITS)
+	 */
+	IPROC_PCIE_MSI_GIC_MODE,
+
+	/*
+	 * IPROC_PCIE_MSI_BASE_ADDR and IPROC_PCIE_MSI_WINDOW_SIZE define the
+	 * window where the MSI posted writes are written, for the writes to be
+	 * interpreted as MSI writes.
+	 */
+	IPROC_PCIE_MSI_BASE_ADDR,
+	IPROC_PCIE_MSI_WINDOW_SIZE,
+
+	/*
+	 * To hold the address of the register where the MSI writes are
+	 * programed.  When ARM GICv3 ITS is used, this should be programmed
+	 * with the address of the GITS_TRANSLATER register.
+	 */
+	IPROC_PCIE_MSI_ADDR_LO,
+	IPROC_PCIE_MSI_ADDR_HI,
+
+	/* enable MSI */
+	IPROC_PCIE_MSI_EN_CFG,
+
+	/* allow access to root complex configuration space */
+	IPROC_PCIE_CFG_IND_ADDR,
+	IPROC_PCIE_CFG_IND_DATA,
+
+	/* allow access to device configuration space */
+	IPROC_PCIE_CFG_ADDR,
+	IPROC_PCIE_CFG_DATA,
+
+	/* enable INTx */
+	IPROC_PCIE_INTX_EN,
+
+	/* outbound address mapping */
+	IPROC_PCIE_OARR0,
+	IPROC_PCIE_OMAP0,
+	IPROC_PCIE_OARR1,
+	IPROC_PCIE_OMAP1,
+	IPROC_PCIE_OARR2,
+	IPROC_PCIE_OMAP2,
+	IPROC_PCIE_OARR3,
+	IPROC_PCIE_OMAP3,
+
+	/* inbound address mapping */
+	IPROC_PCIE_IARR0,
+	IPROC_PCIE_IMAP0,
+	IPROC_PCIE_IARR1,
+	IPROC_PCIE_IMAP1,
+	IPROC_PCIE_IARR2,
+	IPROC_PCIE_IMAP2,
+	IPROC_PCIE_IARR3,
+	IPROC_PCIE_IMAP3,
+	IPROC_PCIE_IARR4,
+	IPROC_PCIE_IMAP4,
+
+	/* config read status */
+	IPROC_PCIE_CFG_RD_STATUS,
+
+	/* link status */
+	IPROC_PCIE_LINK_STATUS,
+
+	/* enable APB error for unsupported requests */
+	IPROC_PCIE_APB_ERR_EN,
+
+	/* total number of core registers */
+	IPROC_PCIE_MAX_NUM_REG,
+};
+
+/* iProc PCIe PAXB BCMA registers */
+static const u16 iproc_pcie_reg_paxb_bcma[IPROC_PCIE_MAX_NUM_REG] = {
+	[IPROC_PCIE_CLK_CTRL]		= 0x000,
+	[IPROC_PCIE_CFG_IND_ADDR]	= 0x120,
+	[IPROC_PCIE_CFG_IND_DATA]	= 0x124,
+	[IPROC_PCIE_CFG_ADDR]		= 0x1f8,
+	[IPROC_PCIE_CFG_DATA]		= 0x1fc,
+	[IPROC_PCIE_INTX_EN]		= 0x330,
+	[IPROC_PCIE_LINK_STATUS]	= 0xf0c,
+};
+
+/* iProc PCIe PAXB registers */
+static const u16 iproc_pcie_reg_paxb[IPROC_PCIE_MAX_NUM_REG] = {
+	[IPROC_PCIE_CLK_CTRL]		= 0x000,
+	[IPROC_PCIE_CFG_IND_ADDR]	= 0x120,
+	[IPROC_PCIE_CFG_IND_DATA]	= 0x124,
+	[IPROC_PCIE_CFG_ADDR]		= 0x1f8,
+	[IPROC_PCIE_CFG_DATA]		= 0x1fc,
+	[IPROC_PCIE_INTX_EN]		= 0x330,
+	[IPROC_PCIE_OARR0]		= 0xd20,
+	[IPROC_PCIE_OMAP0]		= 0xd40,
+	[IPROC_PCIE_OARR1]		= 0xd28,
+	[IPROC_PCIE_OMAP1]		= 0xd48,
+	[IPROC_PCIE_LINK_STATUS]	= 0xf0c,
+	[IPROC_PCIE_APB_ERR_EN]		= 0xf40,
+};
+
+/* iProc PCIe PAXB v2 registers */
+static const u16 iproc_pcie_reg_paxb_v2[IPROC_PCIE_MAX_NUM_REG] = {
+	[IPROC_PCIE_CLK_CTRL]		= 0x000,
+	[IPROC_PCIE_CFG_IND_ADDR]	= 0x120,
+	[IPROC_PCIE_CFG_IND_DATA]	= 0x124,
+	[IPROC_PCIE_CFG_ADDR]		= 0x1f8,
+	[IPROC_PCIE_CFG_DATA]		= 0x1fc,
+	[IPROC_PCIE_INTX_EN]		= 0x330,
+	[IPROC_PCIE_OARR0]		= 0xd20,
+	[IPROC_PCIE_OMAP0]		= 0xd40,
+	[IPROC_PCIE_OARR1]		= 0xd28,
+	[IPROC_PCIE_OMAP1]		= 0xd48,
+	[IPROC_PCIE_OARR2]		= 0xd60,
+	[IPROC_PCIE_OMAP2]		= 0xd68,
+	[IPROC_PCIE_OARR3]		= 0xdf0,
+	[IPROC_PCIE_OMAP3]		= 0xdf8,
+	[IPROC_PCIE_IARR0]		= 0xd00,
+	[IPROC_PCIE_IMAP0]		= 0xc00,
+	[IPROC_PCIE_IARR2]		= 0xd10,
+	[IPROC_PCIE_IMAP2]		= 0xcc0,
+	[IPROC_PCIE_IARR3]		= 0xe00,
+	[IPROC_PCIE_IMAP3]		= 0xe08,
+	[IPROC_PCIE_IARR4]		= 0xe68,
+	[IPROC_PCIE_IMAP4]		= 0xe70,
+	[IPROC_PCIE_CFG_RD_STATUS]	= 0xee0,
+	[IPROC_PCIE_LINK_STATUS]	= 0xf0c,
+	[IPROC_PCIE_APB_ERR_EN]		= 0xf40,
+};
+
+/* iProc PCIe PAXC v1 registers */
+static const u16 iproc_pcie_reg_paxc[IPROC_PCIE_MAX_NUM_REG] = {
+	[IPROC_PCIE_CLK_CTRL]		= 0x000,
+	[IPROC_PCIE_CFG_IND_ADDR]	= 0x1f0,
+	[IPROC_PCIE_CFG_IND_DATA]	= 0x1f4,
+	[IPROC_PCIE_CFG_ADDR]		= 0x1f8,
+	[IPROC_PCIE_CFG_DATA]		= 0x1fc,
+};
+
+/* iProc PCIe PAXC v2 registers */
+static const u16 iproc_pcie_reg_paxc_v2[IPROC_PCIE_MAX_NUM_REG] = {
+	[IPROC_PCIE_MSI_GIC_MODE]	= 0x050,
+	[IPROC_PCIE_MSI_BASE_ADDR]	= 0x074,
+	[IPROC_PCIE_MSI_WINDOW_SIZE]	= 0x078,
+	[IPROC_PCIE_MSI_ADDR_LO]	= 0x07c,
+	[IPROC_PCIE_MSI_ADDR_HI]	= 0x080,
+	[IPROC_PCIE_MSI_EN_CFG]		= 0x09c,
+	[IPROC_PCIE_CFG_IND_ADDR]	= 0x1f0,
+	[IPROC_PCIE_CFG_IND_DATA]	= 0x1f4,
+	[IPROC_PCIE_CFG_ADDR]		= 0x1f8,
+	[IPROC_PCIE_CFG_DATA]		= 0x1fc,
+};
+
+/*
+ * List of device IDs of controllers that have corrupted capability list that
+ * require SW fixup
+ */
+static const u16 iproc_pcie_corrupt_cap_did[] = {
+	0x16cd,
+	0x16f0,
+	0xd802,
+	0xd804
+};
+
+static inline struct iproc_pcie *iproc_data(struct pci_bus *bus)
+{
+	struct iproc_pcie *pcie = bus->sysdata;
+	return pcie;
+}
+
+static inline bool iproc_pcie_reg_is_invalid(u16 reg_offset)
+{
+	return !!(reg_offset == IPROC_PCIE_REG_INVALID);
+}
+
+static inline u16 iproc_pcie_reg_offset(struct iproc_pcie *pcie,
+					enum iproc_pcie_reg reg)
+{
+	return pcie->reg_offsets[reg];
+}
+
+static inline u32 iproc_pcie_read_reg(struct iproc_pcie *pcie,
+				      enum iproc_pcie_reg reg)
+{
+	u16 offset = iproc_pcie_reg_offset(pcie, reg);
+
+	if (iproc_pcie_reg_is_invalid(offset))
+		return 0;
+
+	return readl(pcie->base + offset);
+}
+
+static inline void iproc_pcie_write_reg(struct iproc_pcie *pcie,
+					enum iproc_pcie_reg reg, u32 val)
+{
+	u16 offset = iproc_pcie_reg_offset(pcie, reg);
+
+	if (iproc_pcie_reg_is_invalid(offset))
+		return;
+
+	writel(val, pcie->base + offset);
+}
+
+/**
+ * APB error forwarding can be disabled during access of configuration
+ * registers of the endpoint device, to prevent unsupported requests
+ * (typically seen during enumeration with multi-function devices) from
+ * triggering a system exception.
+ */
+static inline void iproc_pcie_apb_err_disable(struct pci_bus *bus,
+					      bool disable)
+{
+	struct iproc_pcie *pcie = iproc_data(bus);
+	u32 val;
+
+	if (bus->number && pcie->has_apb_err_disable) {
+		val = iproc_pcie_read_reg(pcie, IPROC_PCIE_APB_ERR_EN);
+		if (disable)
+			val &= ~APB_ERR_EN;
+		else
+			val |= APB_ERR_EN;
+		iproc_pcie_write_reg(pcie, IPROC_PCIE_APB_ERR_EN, val);
+	}
+}
+
+static void __iomem *iproc_pcie_map_ep_cfg_reg(struct iproc_pcie *pcie,
+					       unsigned int busno,
+					       unsigned int slot,
+					       unsigned int fn,
+					       int where)
+{
+	u16 offset;
+	u32 val;
+
+	/* EP device access */
+	val = (busno << CFG_ADDR_BUS_NUM_SHIFT) |
+		(slot << CFG_ADDR_DEV_NUM_SHIFT) |
+		(fn << CFG_ADDR_FUNC_NUM_SHIFT) |
+		(where & CFG_ADDR_REG_NUM_MASK) |
+		(1 & CFG_ADDR_CFG_TYPE_MASK);
+
+	iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_ADDR, val);
+	offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_DATA);
+
+	if (iproc_pcie_reg_is_invalid(offset))
+		return NULL;
+
+	return (pcie->base + offset);
+}
+
+static unsigned int iproc_pcie_cfg_retry(struct iproc_pcie *pcie,
+					 void __iomem *cfg_data_p)
+{
+	int timeout = CFG_RETRY_STATUS_TIMEOUT_US;
+	unsigned int data;
+	u32 status;
+
+	/*
+	 * As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only
+	 * affects config reads of the Vendor ID.  For config writes or any
+	 * other config reads, the Root may automatically reissue the
+	 * configuration request again as a new request.
+	 *
+	 * For config reads, this hardware returns CFG_RETRY_STATUS data
+	 * when it receives a CRS completion, regardless of the address of
+	 * the read or the CRS Software Visibility Enable bit.  As a
+	 * partial workaround for this, we retry in software any read that
+	 * returns CFG_RETRY_STATUS.
+	 *
+	 * Note that a non-Vendor ID config register may have a value of
+	 * CFG_RETRY_STATUS.  If we read that, we can't distinguish it from
+	 * a CRS completion, so we will incorrectly retry the read and
+	 * eventually return the wrong data (0xffffffff).
+	 */
+	data = readl(cfg_data_p);
+	while (data == CFG_RETRY_STATUS && timeout--) {
+		/*
+		 * CRS state is set in CFG_RD status register
+		 * This will handle the case where CFG_RETRY_STATUS is
+		 * valid config data.
+		 */
+		status = iproc_pcie_read_reg(pcie, IPROC_PCIE_CFG_RD_STATUS);
+		if (status != CFG_RD_CRS)
+			return data;
+
+		udelay(1);
+		data = readl(cfg_data_p);
+	}
+
+	if (data == CFG_RETRY_STATUS)
+		data = 0xffffffff;
+
+	return data;
+}
+
+static void iproc_pcie_fix_cap(struct iproc_pcie *pcie, int where, u32 *val)
+{
+	u32 i, dev_id;
+
+	switch (where & ~0x3) {
+	case PCI_VENDOR_ID:
+		dev_id = *val >> 16;
+
+		/*
+		 * Activate fixup for those controllers that have corrupted
+		 * capability list registers
+		 */
+		for (i = 0; i < ARRAY_SIZE(iproc_pcie_corrupt_cap_did); i++)
+			if (dev_id == iproc_pcie_corrupt_cap_did[i])
+				pcie->fix_paxc_cap = true;
+		break;
+
+	case IPROC_PCI_PM_CAP:
+		if (pcie->fix_paxc_cap) {
+			/* advertise PM, force next capability to PCIe */
+			*val &= ~IPROC_PCI_PM_CAP_MASK;
+			*val |= IPROC_PCI_EXP_CAP << 8 | PCI_CAP_ID_PM;
+		}
+		break;
+
+	case IPROC_PCI_EXP_CAP:
+		if (pcie->fix_paxc_cap) {
+			/* advertise root port, version 2, terminate here */
+			*val = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2) << 16 |
+				PCI_CAP_ID_EXP;
+		}
+		break;
+
+	case IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL:
+		/* Don't advertise CRS SV support */
+		*val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
+		break;
+
+	default:
+		break;
+	}
+}
+
+static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
+				  int where, int size, u32 *val)
+{
+	struct iproc_pcie *pcie = iproc_data(bus);
+	unsigned int slot = PCI_SLOT(devfn);
+	unsigned int fn = PCI_FUNC(devfn);
+	unsigned int busno = bus->number;
+	void __iomem *cfg_data_p;
+	unsigned int data;
+	int ret;
+
+	/* root complex access */
+	if (busno == 0) {
+		ret = pci_generic_config_read32(bus, devfn, where, size, val);
+		if (ret == PCIBIOS_SUCCESSFUL)
+			iproc_pcie_fix_cap(pcie, where, val);
+
+		return ret;
+	}
+
+	cfg_data_p = iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where);
+
+	if (!cfg_data_p)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	data = iproc_pcie_cfg_retry(pcie, cfg_data_p);
+
+	*val = data;
+	if (size <= 2)
+		*val = (data >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
+
+	/*
+	 * For PAXC and PAXCv2, the total number of PFs that one can enumerate
+	 * depends on the firmware configuration. Unfortunately, due to an ASIC
+	 * bug, unconfigured PFs cannot be properly hidden from the root
+	 * complex. As a result, write access to these PFs will cause bus lock
+	 * up on the embedded processor
+	 *
+	 * Since all unconfigured PFs are left with an incorrect, staled device
+	 * ID of 0x168e (PCI_DEVICE_ID_NX2_57810), we try to catch those access
+	 * early here and reject them all
+	 */
+#define DEVICE_ID_MASK     0xffff0000
+#define DEVICE_ID_SHIFT    16
+	if (pcie->rej_unconfig_pf &&
+	    (where & CFG_ADDR_REG_NUM_MASK) == PCI_VENDOR_ID)
+		if ((*val & DEVICE_ID_MASK) ==
+		    (PCI_DEVICE_ID_NX2_57810 << DEVICE_ID_SHIFT))
+			return PCIBIOS_FUNC_NOT_SUPPORTED;
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+/**
+ * Note access to the configuration registers are protected at the higher layer
+ * by 'pci_lock' in drivers/pci/access.c
+ */
+static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie,
+					    int busno, unsigned int devfn,
+					    int where)
+{
+	unsigned slot = PCI_SLOT(devfn);
+	unsigned fn = PCI_FUNC(devfn);
+	u16 offset;
+
+	/* root complex access */
+	if (busno == 0) {
+		if (slot > 0 || fn > 0)
+			return NULL;
+
+		iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR,
+				     where & CFG_IND_ADDR_MASK);
+		offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA);
+		if (iproc_pcie_reg_is_invalid(offset))
+			return NULL;
+		else
+			return (pcie->base + offset);
+	}
+
+	return iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where);
+}
+
+static void __iomem *iproc_pcie_bus_map_cfg_bus(struct pci_bus *bus,
+						unsigned int devfn,
+						int where)
+{
+	return iproc_pcie_map_cfg_bus(iproc_data(bus), bus->number, devfn,
+				      where);
+}
+
+static int iproc_pci_raw_config_read32(struct iproc_pcie *pcie,
+				       unsigned int devfn, int where,
+				       int size, u32 *val)
+{
+	void __iomem *addr;
+
+	addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3);
+	if (!addr) {
+		*val = ~0;
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+
+	*val = readl(addr);
+
+	if (size <= 2)
+		*val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int iproc_pci_raw_config_write32(struct iproc_pcie *pcie,
+					unsigned int devfn, int where,
+					int size, u32 val)
+{
+	void __iomem *addr;
+	u32 mask, tmp;
+
+	addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3);
+	if (!addr)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	if (size == 4) {
+		writel(val, addr);
+		return PCIBIOS_SUCCESSFUL;
+	}
+
+	mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
+	tmp = readl(addr) & mask;
+	tmp |= val << ((where & 0x3) * 8);
+	writel(tmp, addr);
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
+				    int where, int size, u32 *val)
+{
+	int ret;
+	struct iproc_pcie *pcie = iproc_data(bus);
+
+	iproc_pcie_apb_err_disable(bus, true);
+	if (pcie->iproc_cfg_read)
+		ret = iproc_pcie_config_read(bus, devfn, where, size, val);
+	else
+		ret = pci_generic_config_read32(bus, devfn, where, size, val);
+	iproc_pcie_apb_err_disable(bus, false);
+
+	return ret;
+}
+
+static int iproc_pcie_config_write32(struct pci_bus *bus, unsigned int devfn,
+				     int where, int size, u32 val)
+{
+	int ret;
+
+	iproc_pcie_apb_err_disable(bus, true);
+	ret = pci_generic_config_write32(bus, devfn, where, size, val);
+	iproc_pcie_apb_err_disable(bus, false);
+
+	return ret;
+}
+
+static struct pci_ops iproc_pcie_ops = {
+	.map_bus = iproc_pcie_bus_map_cfg_bus,
+	.read = iproc_pcie_config_read32,
+	.write = iproc_pcie_config_write32,
+};
+
+static void iproc_pcie_perst_ctrl(struct iproc_pcie *pcie, bool assert)
+{
+	u32 val;
+
+	/*
+	 * PAXC and the internal emulated endpoint device downstream should not
+	 * be reset.  If firmware has been loaded on the endpoint device at an
+	 * earlier boot stage, reset here causes issues.
+	 */
+	if (pcie->ep_is_internal)
+		return;
+
+	if (assert) {
+		val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL);
+		val &= ~EP_PERST_SOURCE_SELECT & ~EP_MODE_SURVIVE_PERST &
+			~RC_PCIE_RST_OUTPUT;
+		iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
+		udelay(250);
+	} else {
+		val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL);
+		val |= RC_PCIE_RST_OUTPUT;
+		iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
+		msleep(100);
+	}
+}
+
+int iproc_pcie_shutdown(struct iproc_pcie *pcie)
+{
+	iproc_pcie_perst_ctrl(pcie, true);
+	msleep(500);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(iproc_pcie_shutdown);
+
+static int iproc_pcie_check_link(struct iproc_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	u32 hdr_type, link_ctrl, link_status, class, val;
+	bool link_is_active = false;
+
+	/*
+	 * PAXC connects to emulated endpoint devices directly and does not
+	 * have a Serdes.  Therefore skip the link detection logic here.
+	 */
+	if (pcie->ep_is_internal)
+		return 0;
+
+	val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS);
+	if (!(val & PCIE_PHYLINKUP) || !(val & PCIE_DL_ACTIVE)) {
+		dev_err(dev, "PHY or data link is INACTIVE!\n");
+		return -ENODEV;
+	}
+
+	/* make sure we are not in EP mode */
+	iproc_pci_raw_config_read32(pcie, 0, PCI_HEADER_TYPE, 1, &hdr_type);
+	if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) {
+		dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type);
+		return -EFAULT;
+	}
+
+	/* force class to PCI_CLASS_BRIDGE_PCI (0x0604) */
+#define PCI_BRIDGE_CTRL_REG_OFFSET	0x43c
+#define PCI_CLASS_BRIDGE_MASK		0xffff00
+#define PCI_CLASS_BRIDGE_SHIFT		8
+	iproc_pci_raw_config_read32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET,
+				    4, &class);
+	class &= ~PCI_CLASS_BRIDGE_MASK;
+	class |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT);
+	iproc_pci_raw_config_write32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET,
+				     4, class);
+
+	/* check link status to see if link is active */
+	iproc_pci_raw_config_read32(pcie, 0, IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA,
+				    2, &link_status);
+	if (link_status & PCI_EXP_LNKSTA_NLW)
+		link_is_active = true;
+
+	if (!link_is_active) {
+		/* try GEN 1 link speed */
+#define PCI_TARGET_LINK_SPEED_MASK	0xf
+#define PCI_TARGET_LINK_SPEED_GEN2	0x2
+#define PCI_TARGET_LINK_SPEED_GEN1	0x1
+		iproc_pci_raw_config_read32(pcie, 0,
+					    IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2,
+					    4, &link_ctrl);
+		if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) ==
+		    PCI_TARGET_LINK_SPEED_GEN2) {
+			link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK;
+			link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1;
+			iproc_pci_raw_config_write32(pcie, 0,
+					IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2,
+					4, link_ctrl);
+			msleep(100);
+
+			iproc_pci_raw_config_read32(pcie, 0,
+					IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA,
+					2, &link_status);
+			if (link_status & PCI_EXP_LNKSTA_NLW)
+				link_is_active = true;
+		}
+	}
+
+	dev_info(dev, "link: %s\n", link_is_active ? "UP" : "DOWN");
+
+	return link_is_active ? 0 : -ENODEV;
+}
+
+static void iproc_pcie_enable(struct iproc_pcie *pcie)
+{
+	iproc_pcie_write_reg(pcie, IPROC_PCIE_INTX_EN, SYS_RC_INTX_MASK);
+}
+
+static inline bool iproc_pcie_ob_is_valid(struct iproc_pcie *pcie,
+					  int window_idx)
+{
+	u32 val;
+
+	val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_OARR0, window_idx));
+
+	return !!(val & OARR_VALID);
+}
+
+static inline int iproc_pcie_ob_write(struct iproc_pcie *pcie, int window_idx,
+				      int size_idx, u64 axi_addr, u64 pci_addr)
+{
+	struct device *dev = pcie->dev;
+	u16 oarr_offset, omap_offset;
+
+	/*
+	 * Derive the OARR/OMAP offset from the first pair (OARR0/OMAP0) based
+	 * on window index.
+	 */
+	oarr_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OARR0,
+							  window_idx));
+	omap_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OMAP0,
+							  window_idx));
+	if (iproc_pcie_reg_is_invalid(oarr_offset) ||
+	    iproc_pcie_reg_is_invalid(omap_offset))
+		return -EINVAL;
+
+	/*
+	 * Program the OARR registers.  The upper 32-bit OARR register is
+	 * always right after the lower 32-bit OARR register.
+	 */
+	writel(lower_32_bits(axi_addr) | (size_idx << OARR_SIZE_CFG_SHIFT) |
+	       OARR_VALID, pcie->base + oarr_offset);
+	writel(upper_32_bits(axi_addr), pcie->base + oarr_offset + 4);
+
+	/* now program the OMAP registers */
+	writel(lower_32_bits(pci_addr), pcie->base + omap_offset);
+	writel(upper_32_bits(pci_addr), pcie->base + omap_offset + 4);
+
+	dev_dbg(dev, "ob window [%d]: offset 0x%x axi %pap pci %pap\n",
+		window_idx, oarr_offset, &axi_addr, &pci_addr);
+	dev_dbg(dev, "oarr lo 0x%x oarr hi 0x%x\n",
+		readl(pcie->base + oarr_offset),
+		readl(pcie->base + oarr_offset + 4));
+	dev_dbg(dev, "omap lo 0x%x omap hi 0x%x\n",
+		readl(pcie->base + omap_offset),
+		readl(pcie->base + omap_offset + 4));
+
+	return 0;
+}
+
+/**
+ * Some iProc SoCs require the SW to configure the outbound address mapping
+ *
+ * Outbound address translation:
+ *
+ * iproc_pcie_address = axi_address - axi_offset
+ * OARR = iproc_pcie_address
+ * OMAP = pci_addr
+ *
+ * axi_addr -> iproc_pcie_address -> OARR -> OMAP -> pci_address
+ */
+static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
+			       u64 pci_addr, resource_size_t size)
+{
+	struct iproc_pcie_ob *ob = &pcie->ob;
+	struct device *dev = pcie->dev;
+	int ret = -EINVAL, window_idx, size_idx;
+
+	if (axi_addr < ob->axi_offset) {
+		dev_err(dev, "axi address %pap less than offset %pap\n",
+			&axi_addr, &ob->axi_offset);
+		return -EINVAL;
+	}
+
+	/*
+	 * Translate the AXI address to the internal address used by the iProc
+	 * PCIe core before programming the OARR
+	 */
+	axi_addr -= ob->axi_offset;
+
+	/* iterate through all OARR/OMAP mapping windows */
+	for (window_idx = ob->nr_windows - 1; window_idx >= 0; window_idx--) {
+		const struct iproc_pcie_ob_map *ob_map =
+			&pcie->ob_map[window_idx];
+
+		/*
+		 * If current outbound window is already in use, move on to the
+		 * next one.
+		 */
+		if (iproc_pcie_ob_is_valid(pcie, window_idx))
+			continue;
+
+		/*
+		 * Iterate through all supported window sizes within the
+		 * OARR/OMAP pair to find a match.  Go through the window sizes
+		 * in a descending order.
+		 */
+		for (size_idx = ob_map->nr_sizes - 1; size_idx >= 0;
+		     size_idx--) {
+			resource_size_t window_size =
+				ob_map->window_sizes[size_idx] * SZ_1M;
+
+			/*
+			 * Keep iterating until we reach the last window and
+			 * with the minimal window size at index zero. In this
+			 * case, we take a compromise by mapping it using the
+			 * minimum window size that can be supported
+			 */
+			if (size < window_size) {
+				if (size_idx > 0 || window_idx > 0)
+					continue;
+
+				/*
+				 * For the corner case of reaching the minimal
+				 * window size that can be supported on the
+				 * last window
+				 */
+				axi_addr = ALIGN_DOWN(axi_addr, window_size);
+				pci_addr = ALIGN_DOWN(pci_addr, window_size);
+				size = window_size;
+			}
+
+			if (!IS_ALIGNED(axi_addr, window_size) ||
+			    !IS_ALIGNED(pci_addr, window_size)) {
+				dev_err(dev,
+					"axi %pap or pci %pap not aligned\n",
+					&axi_addr, &pci_addr);
+				return -EINVAL;
+			}
+
+			/*
+			 * Match found!  Program both OARR and OMAP and mark
+			 * them as a valid entry.
+			 */
+			ret = iproc_pcie_ob_write(pcie, window_idx, size_idx,
+						  axi_addr, pci_addr);
+			if (ret)
+				goto err_ob;
+
+			size -= window_size;
+			if (size == 0)
+				return 0;
+
+			/*
+			 * If we are here, we are done with the current window,
+			 * but not yet finished all mappings.  Need to move on
+			 * to the next window.
+			 */
+			axi_addr += window_size;
+			pci_addr += window_size;
+			break;
+		}
+	}
+
+err_ob:
+	dev_err(dev, "unable to configure outbound mapping\n");
+	dev_err(dev,
+		"axi %pap, axi offset %pap, pci %pap, res size %pap\n",
+		&axi_addr, &ob->axi_offset, &pci_addr, &size);
+
+	return ret;
+}
+
+static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
+				 struct list_head *resources)
+{
+	struct device *dev = pcie->dev;
+	struct resource_entry *window;
+	int ret;
+
+	resource_list_for_each_entry(window, resources) {
+		struct resource *res = window->res;
+		u64 res_type = resource_type(res);
+
+		switch (res_type) {
+		case IORESOURCE_IO:
+		case IORESOURCE_BUS:
+			break;
+		case IORESOURCE_MEM:
+			ret = iproc_pcie_setup_ob(pcie, res->start,
+						  res->start - window->offset,
+						  resource_size(res));
+			if (ret)
+				return ret;
+			break;
+		default:
+			dev_err(dev, "invalid resource %pR\n", res);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static inline bool iproc_pcie_ib_is_in_use(struct iproc_pcie *pcie,
+					   int region_idx)
+{
+	const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx];
+	u32 val;
+
+	val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_IARR0, region_idx));
+
+	return !!(val & (BIT(ib_map->nr_sizes) - 1));
+}
+
+static inline bool iproc_pcie_ib_check_type(const struct iproc_pcie_ib_map *ib_map,
+					    enum iproc_pcie_ib_map_type type)
+{
+	return !!(ib_map->type == type);
+}
+
+static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx,
+			       int size_idx, int nr_windows, u64 axi_addr,
+			       u64 pci_addr, resource_size_t size)
+{
+	struct device *dev = pcie->dev;
+	const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx];
+	u16 iarr_offset, imap_offset;
+	u32 val;
+	int window_idx;
+
+	iarr_offset = iproc_pcie_reg_offset(pcie,
+				MAP_REG(IPROC_PCIE_IARR0, region_idx));
+	imap_offset = iproc_pcie_reg_offset(pcie,
+				MAP_REG(IPROC_PCIE_IMAP0, region_idx));
+	if (iproc_pcie_reg_is_invalid(iarr_offset) ||
+	    iproc_pcie_reg_is_invalid(imap_offset))
+		return -EINVAL;
+
+	dev_dbg(dev, "ib region [%d]: offset 0x%x axi %pap pci %pap\n",
+		region_idx, iarr_offset, &axi_addr, &pci_addr);
+
+	/*
+	 * Program the IARR registers.  The upper 32-bit IARR register is
+	 * always right after the lower 32-bit IARR register.
+	 */
+	writel(lower_32_bits(pci_addr) | BIT(size_idx),
+	       pcie->base + iarr_offset);
+	writel(upper_32_bits(pci_addr), pcie->base + iarr_offset + 4);
+
+	dev_dbg(dev, "iarr lo 0x%x iarr hi 0x%x\n",
+		readl(pcie->base + iarr_offset),
+		readl(pcie->base + iarr_offset + 4));
+
+	/*
+	 * Now program the IMAP registers.  Each IARR region may have one or
+	 * more IMAP windows.
+	 */
+	size >>= ilog2(nr_windows);
+	for (window_idx = 0; window_idx < nr_windows; window_idx++) {
+		val = readl(pcie->base + imap_offset);
+		val |= lower_32_bits(axi_addr) | IMAP_VALID;
+		writel(val, pcie->base + imap_offset);
+		writel(upper_32_bits(axi_addr),
+		       pcie->base + imap_offset + ib_map->imap_addr_offset);
+
+		dev_dbg(dev, "imap window [%d] lo 0x%x hi 0x%x\n",
+			window_idx, readl(pcie->base + imap_offset),
+			readl(pcie->base + imap_offset +
+			      ib_map->imap_addr_offset));
+
+		imap_offset += ib_map->imap_window_offset;
+		axi_addr += size;
+	}
+
+	return 0;
+}
+
+static int iproc_pcie_setup_ib(struct iproc_pcie *pcie,
+			       struct of_pci_range *range,
+			       enum iproc_pcie_ib_map_type type)
+{
+	struct device *dev = pcie->dev;
+	struct iproc_pcie_ib *ib = &pcie->ib;
+	int ret;
+	unsigned int region_idx, size_idx;
+	u64 axi_addr = range->cpu_addr, pci_addr = range->pci_addr;
+	resource_size_t size = range->size;
+
+	/* iterate through all IARR mapping regions */
+	for (region_idx = 0; region_idx < ib->nr_regions; region_idx++) {
+		const struct iproc_pcie_ib_map *ib_map =
+			&pcie->ib_map[region_idx];
+
+		/*
+		 * If current inbound region is already in use or not a
+		 * compatible type, move on to the next.
+		 */
+		if (iproc_pcie_ib_is_in_use(pcie, region_idx) ||
+		    !iproc_pcie_ib_check_type(ib_map, type))
+			continue;
+
+		/* iterate through all supported region sizes to find a match */
+		for (size_idx = 0; size_idx < ib_map->nr_sizes; size_idx++) {
+			resource_size_t region_size =
+			ib_map->region_sizes[size_idx] * ib_map->size_unit;
+
+			if (size != region_size)
+				continue;
+
+			if (!IS_ALIGNED(axi_addr, region_size) ||
+			    !IS_ALIGNED(pci_addr, region_size)) {
+				dev_err(dev,
+					"axi %pap or pci %pap not aligned\n",
+					&axi_addr, &pci_addr);
+				return -EINVAL;
+			}
+
+			/* Match found!  Program IARR and all IMAP windows. */
+			ret = iproc_pcie_ib_write(pcie, region_idx, size_idx,
+						  ib_map->nr_windows, axi_addr,
+						  pci_addr, size);
+			if (ret)
+				goto err_ib;
+			else
+				return 0;
+
+		}
+	}
+	ret = -EINVAL;
+
+err_ib:
+	dev_err(dev, "unable to configure inbound mapping\n");
+	dev_err(dev, "axi %pap, pci %pap, res size %pap\n",
+		&axi_addr, &pci_addr, &size);
+
+	return ret;
+}
+
+static int iproc_pcie_add_dma_range(struct device *dev,
+				    struct list_head *resources,
+				    struct of_pci_range *range)
+{
+	struct resource *res;
+	struct resource_entry *entry, *tmp;
+	struct list_head *head = resources;
+
+	res = devm_kzalloc(dev, sizeof(struct resource), GFP_KERNEL);
+	if (!res)
+		return -ENOMEM;
+
+	resource_list_for_each_entry(tmp, resources) {
+		if (tmp->res->start < range->cpu_addr)
+			head = &tmp->node;
+	}
+
+	res->start = range->cpu_addr;
+	res->end = res->start + range->size - 1;
+
+	entry = resource_list_create_entry(res, 0);
+	if (!entry)
+		return -ENOMEM;
+
+	entry->offset = res->start - range->cpu_addr;
+	resource_list_add(entry, head);
+
+	return 0;
+}
+
+static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
+{
+	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+	struct of_pci_range range;
+	struct of_pci_range_parser parser;
+	int ret;
+	LIST_HEAD(resources);
+
+	/* Get the dma-ranges from DT */
+	ret = of_pci_dma_range_parser_init(&parser, pcie->dev->of_node);
+	if (ret)
+		return ret;
+
+	for_each_of_pci_range(&parser, &range) {
+		ret = iproc_pcie_add_dma_range(pcie->dev,
+					       &resources,
+					       &range);
+		if (ret)
+			goto out;
+		/* Each range entry corresponds to an inbound mapping region */
+		ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_MEM);
+		if (ret)
+			goto out;
+	}
+
+	list_splice_init(&resources, &host->dma_ranges);
+
+	return 0;
+out:
+	pci_free_resource_list(&resources);
+	return ret;
+}
+
+static int iproce_pcie_get_msi(struct iproc_pcie *pcie,
+			       struct device_node *msi_node,
+			       u64 *msi_addr)
+{
+	struct device *dev = pcie->dev;
+	int ret;
+	struct resource res;
+
+	/*
+	 * Check if 'msi-map' points to ARM GICv3 ITS, which is the only
+	 * supported external MSI controller that requires steering.
+	 */
+	if (!of_device_is_compatible(msi_node, "arm,gic-v3-its")) {
+		dev_err(dev, "unable to find compatible MSI controller\n");
+		return -ENODEV;
+	}
+
+	/* derive GITS_TRANSLATER address from GICv3 */
+	ret = of_address_to_resource(msi_node, 0, &res);
+	if (ret < 0) {
+		dev_err(dev, "unable to obtain MSI controller resources\n");
+		return ret;
+	}
+
+	*msi_addr = res.start + GITS_TRANSLATER;
+	return 0;
+}
+
+static int iproc_pcie_paxb_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr)
+{
+	int ret;
+	struct of_pci_range range;
+
+	memset(&range, 0, sizeof(range));
+	range.size = SZ_32K;
+	range.pci_addr = range.cpu_addr = msi_addr & ~(range.size - 1);
+
+	ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_IO);
+	return ret;
+}
+
+static void iproc_pcie_paxc_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr,
+					 bool enable)
+{
+	u32 val;
+
+	if (!enable) {
+		/*
+		 * Disable PAXC MSI steering. All write transfers will be
+		 * treated as non-MSI transfers
+		 */
+		val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG);
+		val &= ~MSI_ENABLE_CFG;
+		iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val);
+		return;
+	}
+
+	/*
+	 * Program bits [43:13] of address of GITS_TRANSLATER register into
+	 * bits [30:0] of the MSI base address register.  In fact, in all iProc
+	 * based SoCs, all I/O register bases are well below the 32-bit
+	 * boundary, so we can safely assume bits [43:32] are always zeros.
+	 */
+	iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_BASE_ADDR,
+			     (u32)(msi_addr >> 13));
+
+	/* use a default 8K window size */
+	iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_WINDOW_SIZE, 0);
+
+	/* steering MSI to GICv3 ITS */
+	val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_GIC_MODE);
+	val |= GIC_V3_CFG;
+	iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_GIC_MODE, val);
+
+	/*
+	 * Program bits [43:2] of address of GITS_TRANSLATER register into the
+	 * iProc MSI address registers.
+	 */
+	msi_addr >>= 2;
+	iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_HI,
+			     upper_32_bits(msi_addr));
+	iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_LO,
+			     lower_32_bits(msi_addr));
+
+	/* enable MSI */
+	val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG);
+	val |= MSI_ENABLE_CFG;
+	iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val);
+}
+
+static int iproc_pcie_msi_steer(struct iproc_pcie *pcie,
+				struct device_node *msi_node)
+{
+	struct device *dev = pcie->dev;
+	int ret;
+	u64 msi_addr;
+
+	ret = iproce_pcie_get_msi(pcie, msi_node, &msi_addr);
+	if (ret < 0) {
+		dev_err(dev, "msi steering failed\n");
+		return ret;
+	}
+
+	switch (pcie->type) {
+	case IPROC_PCIE_PAXB_V2:
+		ret = iproc_pcie_paxb_v2_msi_steer(pcie, msi_addr);
+		if (ret)
+			return ret;
+		break;
+	case IPROC_PCIE_PAXC_V2:
+		iproc_pcie_paxc_v2_msi_steer(pcie, msi_addr, true);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int iproc_pcie_msi_enable(struct iproc_pcie *pcie)
+{
+	struct device_node *msi_node;
+	int ret;
+
+	/*
+	 * Either the "msi-parent" or the "msi-map" phandle needs to exist
+	 * for us to obtain the MSI node.
+	 */
+
+	msi_node = of_parse_phandle(pcie->dev->of_node, "msi-parent", 0);
+	if (!msi_node) {
+		const __be32 *msi_map = NULL;
+		int len;
+		u32 phandle;
+
+		msi_map = of_get_property(pcie->dev->of_node, "msi-map", &len);
+		if (!msi_map)
+			return -ENODEV;
+
+		phandle = be32_to_cpup(msi_map + 1);
+		msi_node = of_find_node_by_phandle(phandle);
+		if (!msi_node)
+			return -ENODEV;
+	}
+
+	/*
+	 * Certain revisions of the iProc PCIe controller require additional
+	 * configurations to steer the MSI writes towards an external MSI
+	 * controller.
+	 */
+	if (pcie->need_msi_steer) {
+		ret = iproc_pcie_msi_steer(pcie, msi_node);
+		if (ret)
+			goto out_put_node;
+	}
+
+	/*
+	 * If another MSI controller is being used, the call below should fail
+	 * but that is okay
+	 */
+	ret = iproc_msi_init(pcie, msi_node);
+
+out_put_node:
+	of_node_put(msi_node);
+	return ret;
+}
+
+static void iproc_pcie_msi_disable(struct iproc_pcie *pcie)
+{
+	iproc_msi_exit(pcie);
+}
+
+static int iproc_pcie_rev_init(struct iproc_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	unsigned int reg_idx;
+	const u16 *regs;
+
+	switch (pcie->type) {
+	case IPROC_PCIE_PAXB_BCMA:
+		regs = iproc_pcie_reg_paxb_bcma;
+		break;
+	case IPROC_PCIE_PAXB:
+		regs = iproc_pcie_reg_paxb;
+		pcie->has_apb_err_disable = true;
+		if (pcie->need_ob_cfg) {
+			pcie->ob_map = paxb_ob_map;
+			pcie->ob.nr_windows = ARRAY_SIZE(paxb_ob_map);
+		}
+		break;
+	case IPROC_PCIE_PAXB_V2:
+		regs = iproc_pcie_reg_paxb_v2;
+		pcie->iproc_cfg_read = true;
+		pcie->has_apb_err_disable = true;
+		if (pcie->need_ob_cfg) {
+			pcie->ob_map = paxb_v2_ob_map;
+			pcie->ob.nr_windows = ARRAY_SIZE(paxb_v2_ob_map);
+		}
+		pcie->ib.nr_regions = ARRAY_SIZE(paxb_v2_ib_map);
+		pcie->ib_map = paxb_v2_ib_map;
+		pcie->need_msi_steer = true;
+		dev_warn(dev, "reads of config registers that contain %#x return incorrect data\n",
+			 CFG_RETRY_STATUS);
+		break;
+	case IPROC_PCIE_PAXC:
+		regs = iproc_pcie_reg_paxc;
+		pcie->ep_is_internal = true;
+		pcie->iproc_cfg_read = true;
+		pcie->rej_unconfig_pf = true;
+		break;
+	case IPROC_PCIE_PAXC_V2:
+		regs = iproc_pcie_reg_paxc_v2;
+		pcie->ep_is_internal = true;
+		pcie->iproc_cfg_read = true;
+		pcie->rej_unconfig_pf = true;
+		pcie->need_msi_steer = true;
+		break;
+	default:
+		dev_err(dev, "incompatible iProc PCIe interface\n");
+		return -EINVAL;
+	}
+
+	pcie->reg_offsets = devm_kcalloc(dev, IPROC_PCIE_MAX_NUM_REG,
+					 sizeof(*pcie->reg_offsets),
+					 GFP_KERNEL);
+	if (!pcie->reg_offsets)
+		return -ENOMEM;
+
+	/* go through the register table and populate all valid registers */
+	pcie->reg_offsets[0] = (pcie->type == IPROC_PCIE_PAXC_V2) ?
+		IPROC_PCIE_REG_INVALID : regs[0];
+	for (reg_idx = 1; reg_idx < IPROC_PCIE_MAX_NUM_REG; reg_idx++)
+		pcie->reg_offsets[reg_idx] = regs[reg_idx] ?
+			regs[reg_idx] : IPROC_PCIE_REG_INVALID;
+
+	return 0;
+}
+
+int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
+{
+	struct device *dev;
+	int ret;
+	struct pci_bus *child;
+	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+
+	dev = pcie->dev;
+
+	ret = iproc_pcie_rev_init(pcie);
+	if (ret) {
+		dev_err(dev, "unable to initialize controller parameters\n");
+		return ret;
+	}
+
+	ret = devm_request_pci_bus_resources(dev, res);
+	if (ret)
+		return ret;
+
+	ret = phy_init(pcie->phy);
+	if (ret) {
+		dev_err(dev, "unable to initialize PCIe PHY\n");
+		return ret;
+	}
+
+	ret = phy_power_on(pcie->phy);
+	if (ret) {
+		dev_err(dev, "unable to power on PCIe PHY\n");
+		goto err_exit_phy;
+	}
+
+	iproc_pcie_perst_ctrl(pcie, true);
+	iproc_pcie_perst_ctrl(pcie, false);
+
+	if (pcie->need_ob_cfg) {
+		ret = iproc_pcie_map_ranges(pcie, res);
+		if (ret) {
+			dev_err(dev, "map failed\n");
+			goto err_power_off_phy;
+		}
+	}
+
+	if (pcie->need_ib_cfg) {
+		ret = iproc_pcie_map_dma_ranges(pcie);
+		if (ret && ret != -ENOENT)
+			goto err_power_off_phy;
+	}
+
+	ret = iproc_pcie_check_link(pcie);
+	if (ret) {
+		dev_err(dev, "no PCIe EP device detected\n");
+		goto err_power_off_phy;
+	}
+
+	iproc_pcie_enable(pcie);
+
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		if (iproc_pcie_msi_enable(pcie))
+			dev_info(dev, "not using iProc MSI\n");
+
+	list_splice_init(res, &host->windows);
+	host->busnr = 0;
+	host->dev.parent = dev;
+	host->ops = &iproc_pcie_ops;
+	host->sysdata = pcie;
+	host->map_irq = pcie->map_irq;
+	host->swizzle_irq = pci_common_swizzle;
+
+	ret = pci_scan_root_bus_bridge(host);
+	if (ret < 0) {
+		dev_err(dev, "failed to scan host: %d\n", ret);
+		goto err_power_off_phy;
+	}
+
+	pci_assign_unassigned_bus_resources(host->bus);
+
+	pcie->root_bus = host->bus;
+
+	list_for_each_entry(child, &host->bus->children, node)
+		pcie_bus_configure_settings(child);
+
+	pci_bus_add_devices(host->bus);
+
+	return 0;
+
+err_power_off_phy:
+	phy_power_off(pcie->phy);
+err_exit_phy:
+	phy_exit(pcie->phy);
+	return ret;
+}
+EXPORT_SYMBOL(iproc_pcie_setup);
+
+int iproc_pcie_remove(struct iproc_pcie *pcie)
+{
+	pci_stop_root_bus(pcie->root_bus);
+	pci_remove_root_bus(pcie->root_bus);
+
+	iproc_pcie_msi_disable(pcie);
+
+	phy_power_off(pcie->phy);
+	phy_exit(pcie->phy);
+
+	return 0;
+}
+EXPORT_SYMBOL(iproc_pcie_remove);
+
+/*
+ * The MSI parsing logic in certain revisions of Broadcom PAXC based root
+ * complex does not work and needs to be disabled
+ */
+static void quirk_paxc_disable_msi_parsing(struct pci_dev *pdev)
+{
+	struct iproc_pcie *pcie = iproc_data(pdev->bus);
+
+	if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+		iproc_pcie_paxc_v2_msi_steer(pcie, 0, false);
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0,
+			quirk_paxc_disable_msi_parsing);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802,
+			quirk_paxc_disable_msi_parsing);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804,
+			quirk_paxc_disable_msi_parsing);
+
+static void quirk_paxc_bridge(struct pci_dev *pdev)
+{
+	/*
+	 * The PCI config space is shared with the PAXC root port and the first
+	 * Ethernet device.  So, we need to workaround this by telling the PCI
+	 * code that the bridge is not an Ethernet device.
+	 */
+	if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+		pdev->class = PCI_CLASS_BRIDGE_PCI << 8;
+
+	/*
+	 * MPSS is not being set properly (as it is currently 0).  This is
+	 * because that area of the PCI config space is hard coded to zero, and
+	 * is not modifiable by firmware.  Set this to 2 (e.g., 512 byte MPS)
+	 * so that the MPS can be set to the real max value.
+	 */
+	pdev->pcie_mpss = 2;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd750, quirk_paxc_bridge);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, quirk_paxc_bridge);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, quirk_paxc_bridge);
+
+MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom iPROC PCIe common driver");
+MODULE_LICENSE("GPL v2");
diff --git a/marvell/linux/drivers/pci/controller/pcie-iproc.h b/marvell/linux/drivers/pci/controller/pcie-iproc.h
new file mode 100644
index 0000000..4f03ea5
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pcie-iproc.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2014-2015 Broadcom Corporation
+ */
+
+#ifndef _PCIE_IPROC_H
+#define _PCIE_IPROC_H
+
+/**
+ * iProc PCIe interface type
+ *
+ * PAXB is the wrapper used in root complex that can be connected to an
+ * external endpoint device.
+ *
+ * PAXC is the wrapper used in root complex dedicated for internal emulated
+ * endpoint devices.
+ */
+enum iproc_pcie_type {
+	IPROC_PCIE_PAXB_BCMA = 0,
+	IPROC_PCIE_PAXB,
+	IPROC_PCIE_PAXB_V2,
+	IPROC_PCIE_PAXC,
+	IPROC_PCIE_PAXC_V2,
+};
+
+/**
+ * iProc PCIe outbound mapping
+ * @axi_offset: offset from the AXI address to the internal address used by
+ * the iProc PCIe core
+ * @nr_windows: total number of supported outbound mapping windows
+ */
+struct iproc_pcie_ob {
+	resource_size_t axi_offset;
+	unsigned int nr_windows;
+};
+
+/**
+ * iProc PCIe inbound mapping
+ * @nr_regions: total number of supported inbound mapping regions
+ */
+struct iproc_pcie_ib {
+	unsigned int nr_regions;
+};
+
+struct iproc_pcie_ob_map;
+struct iproc_pcie_ib_map;
+struct iproc_msi;
+
+/**
+ * iProc PCIe device
+ *
+ * @dev: pointer to device data structure
+ * @type: iProc PCIe interface type
+ * @reg_offsets: register offsets
+ * @base: PCIe host controller I/O register base
+ * @base_addr: PCIe host controller register base physical address
+ * @root_bus: pointer to root bus
+ * @phy: optional PHY device that controls the Serdes
+ * @map_irq: function callback to map interrupts
+ * @ep_is_internal: indicates an internal emulated endpoint device is connected
+ * @iproc_cfg_read: indicates the iProc config read function should be used
+ * @rej_unconfig_pf: indicates the root complex needs to detect and reject
+ * enumeration against unconfigured physical functions emulated in the ASIC
+ * @has_apb_err_disable: indicates the controller can be configured to prevent
+ * unsupported request from being forwarded as an APB bus error
+ * @fix_paxc_cap: indicates the controller has corrupted capability list in its
+ * config space registers and requires SW based fixup
+ *
+ * @need_ob_cfg: indicates SW needs to configure the outbound mapping window
+ * @ob: outbound mapping related parameters
+ * @ob_map: outbound mapping related parameters specific to the controller
+ *
+ * @need_ib_cfg: indicates SW needs to configure the inbound mapping window
+ * @ib: inbound mapping related parameters
+ * @ib_map: outbound mapping region related parameters
+ *
+ * @need_msi_steer: indicates additional configuration of the iProc PCIe
+ * controller is required to steer MSI writes to external interrupt controller
+ * @msi: MSI data
+ */
+struct iproc_pcie {
+	struct device *dev;
+	enum iproc_pcie_type type;
+	u16 *reg_offsets;
+	void __iomem *base;
+	phys_addr_t base_addr;
+	struct resource mem;
+	struct pci_bus *root_bus;
+	struct phy *phy;
+	int (*map_irq)(const struct pci_dev *, u8, u8);
+	bool ep_is_internal;
+	bool iproc_cfg_read;
+	bool rej_unconfig_pf;
+	bool has_apb_err_disable;
+	bool fix_paxc_cap;
+
+	bool need_ob_cfg;
+	struct iproc_pcie_ob ob;
+	const struct iproc_pcie_ob_map *ob_map;
+
+	bool need_ib_cfg;
+	struct iproc_pcie_ib ib;
+	const struct iproc_pcie_ib_map *ib_map;
+
+	bool need_msi_steer;
+	struct iproc_msi *msi;
+};
+
+int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res);
+int iproc_pcie_remove(struct iproc_pcie *pcie);
+int iproc_pcie_shutdown(struct iproc_pcie *pcie);
+
+#ifdef CONFIG_PCIE_IPROC_MSI
+int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node);
+void iproc_msi_exit(struct iproc_pcie *pcie);
+#else
+static inline int iproc_msi_init(struct iproc_pcie *pcie,
+				 struct device_node *node)
+{
+	return -ENODEV;
+}
+static inline void iproc_msi_exit(struct iproc_pcie *pcie)
+{
+}
+#endif
+
+#endif /* _PCIE_IPROC_H */
diff --git a/marvell/linux/drivers/pci/controller/pcie-mediatek.c b/marvell/linux/drivers/pci/controller/pcie-mediatek.c
new file mode 100644
index 0000000..ff22bca
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pcie-mediatek.c
@@ -0,0 +1,1263 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MediaTek PCIe host controller driver.
+ *
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ *	   Honghui Zhang <honghui.zhang@mediatek.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/msi.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include "../pci.h"
+
+/* PCIe shared registers */
+#define PCIE_SYS_CFG		0x00
+#define PCIE_INT_ENABLE		0x0c
+#define PCIE_CFG_ADDR		0x20
+#define PCIE_CFG_DATA		0x24
+
+/* PCIe per port registers */
+#define PCIE_BAR0_SETUP		0x10
+#define PCIE_CLASS		0x34
+#define PCIE_LINK_STATUS	0x50
+
+#define PCIE_PORT_INT_EN(x)	BIT(20 + (x))
+#define PCIE_PORT_PERST(x)	BIT(1 + (x))
+#define PCIE_PORT_LINKUP	BIT(0)
+#define PCIE_BAR_MAP_MAX	GENMASK(31, 16)
+
+#define PCIE_BAR_ENABLE		BIT(0)
+#define PCIE_REVISION_ID	BIT(0)
+#define PCIE_CLASS_CODE		(0x60400 << 8)
+#define PCIE_CONF_REG(regn)	(((regn) & GENMASK(7, 2)) | \
+				((((regn) >> 8) & GENMASK(3, 0)) << 24))
+#define PCIE_CONF_FUN(fun)	(((fun) << 8) & GENMASK(10, 8))
+#define PCIE_CONF_DEV(dev)	(((dev) << 11) & GENMASK(15, 11))
+#define PCIE_CONF_BUS(bus)	(((bus) << 16) & GENMASK(23, 16))
+#define PCIE_CONF_ADDR(regn, fun, dev, bus) \
+	(PCIE_CONF_REG(regn) | PCIE_CONF_FUN(fun) | \
+	 PCIE_CONF_DEV(dev) | PCIE_CONF_BUS(bus))
+
+/* MediaTek specific configuration registers */
+#define PCIE_FTS_NUM		0x70c
+#define PCIE_FTS_NUM_MASK	GENMASK(15, 8)
+#define PCIE_FTS_NUM_L0(x)	((x) & 0xff << 8)
+
+#define PCIE_FC_CREDIT		0x73c
+#define PCIE_FC_CREDIT_MASK	(GENMASK(31, 31) | GENMASK(28, 16))
+#define PCIE_FC_CREDIT_VAL(x)	((x) << 16)
+
+/* PCIe V2 share registers */
+#define PCIE_SYS_CFG_V2		0x0
+#define PCIE_CSR_LTSSM_EN(x)	BIT(0 + (x) * 8)
+#define PCIE_CSR_ASPM_L1_EN(x)	BIT(1 + (x) * 8)
+
+/* PCIe V2 per-port registers */
+#define PCIE_MSI_VECTOR		0x0c0
+
+#define PCIE_CONF_VEND_ID	0x100
+#define PCIE_CONF_DEVICE_ID	0x102
+#define PCIE_CONF_CLASS_ID	0x106
+
+#define PCIE_INT_MASK		0x420
+#define INTX_MASK		GENMASK(19, 16)
+#define INTX_SHIFT		16
+#define PCIE_INT_STATUS		0x424
+#define MSI_STATUS		BIT(23)
+#define PCIE_IMSI_STATUS	0x42c
+#define PCIE_IMSI_ADDR		0x430
+#define MSI_MASK		BIT(23)
+#define MTK_MSI_IRQS_NUM	32
+
+#define PCIE_AHB_TRANS_BASE0_L	0x438
+#define PCIE_AHB_TRANS_BASE0_H	0x43c
+#define AHB2PCIE_SIZE(x)	((x) & GENMASK(4, 0))
+#define PCIE_AXI_WINDOW0	0x448
+#define WIN_ENABLE		BIT(7)
+/*
+ * Define PCIe to AHB window size as 2^33 to support max 8GB address space
+ * translate, support least 4GB DRAM size access from EP DMA(physical DRAM
+ * start from 0x40000000).
+ */
+#define PCIE2AHB_SIZE	0x21
+
+/* PCIe V2 configuration transaction header */
+#define PCIE_CFG_HEADER0	0x460
+#define PCIE_CFG_HEADER1	0x464
+#define PCIE_CFG_HEADER2	0x468
+#define PCIE_CFG_WDATA		0x470
+#define PCIE_APP_TLP_REQ	0x488
+#define PCIE_CFG_RDATA		0x48c
+#define APP_CFG_REQ		BIT(0)
+#define APP_CPL_STATUS		GENMASK(7, 5)
+
+#define CFG_WRRD_TYPE_0		4
+#define CFG_WR_FMT		2
+#define CFG_RD_FMT		0
+
+#define CFG_DW0_LENGTH(length)	((length) & GENMASK(9, 0))
+#define CFG_DW0_TYPE(type)	(((type) << 24) & GENMASK(28, 24))
+#define CFG_DW0_FMT(fmt)	(((fmt) << 29) & GENMASK(31, 29))
+#define CFG_DW2_REGN(regn)	((regn) & GENMASK(11, 2))
+#define CFG_DW2_FUN(fun)	(((fun) << 16) & GENMASK(18, 16))
+#define CFG_DW2_DEV(dev)	(((dev) << 19) & GENMASK(23, 19))
+#define CFG_DW2_BUS(bus)	(((bus) << 24) & GENMASK(31, 24))
+#define CFG_HEADER_DW0(type, fmt) \
+	(CFG_DW0_LENGTH(1) | CFG_DW0_TYPE(type) | CFG_DW0_FMT(fmt))
+#define CFG_HEADER_DW1(where, size) \
+	(GENMASK(((size) - 1), 0) << ((where) & 0x3))
+#define CFG_HEADER_DW2(regn, fun, dev, bus) \
+	(CFG_DW2_REGN(regn) | CFG_DW2_FUN(fun) | \
+	CFG_DW2_DEV(dev) | CFG_DW2_BUS(bus))
+
+#define PCIE_RST_CTRL		0x510
+#define PCIE_PHY_RSTB		BIT(0)
+#define PCIE_PIPE_SRSTB		BIT(1)
+#define PCIE_MAC_SRSTB		BIT(2)
+#define PCIE_CRSTB		BIT(3)
+#define PCIE_PERSTB		BIT(8)
+#define PCIE_LINKDOWN_RST_EN	GENMASK(15, 13)
+#define PCIE_LINK_STATUS_V2	0x804
+#define PCIE_PORT_LINKUP_V2	BIT(10)
+
+struct mtk_pcie_port;
+
+/**
+ * struct mtk_pcie_soc - differentiate between host generations
+ * @need_fix_class_id: whether this host's class ID needed to be fixed or not
+ * @need_fix_device_id: whether this host's device ID needed to be fixed or not
+ * @device_id: device ID which this host need to be fixed
+ * @ops: pointer to configuration access functions
+ * @startup: pointer to controller setting functions
+ * @setup_irq: pointer to initialize IRQ functions
+ */
+struct mtk_pcie_soc {
+	bool need_fix_class_id;
+	bool need_fix_device_id;
+	unsigned int device_id;
+	struct pci_ops *ops;
+	int (*startup)(struct mtk_pcie_port *port);
+	int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node);
+};
+
+/**
+ * struct mtk_pcie_port - PCIe port information
+ * @base: IO mapped register base
+ * @list: port list
+ * @pcie: pointer to PCIe host info
+ * @reset: pointer to port reset control
+ * @sys_ck: pointer to transaction/data link layer clock
+ * @ahb_ck: pointer to AHB slave interface operating clock for CSR access
+ *          and RC initiated MMIO access
+ * @axi_ck: pointer to application layer MMIO channel operating clock
+ * @aux_ck: pointer to pe2_mac_bridge and pe2_mac_core operating clock
+ *          when pcie_mac_ck/pcie_pipe_ck is turned off
+ * @obff_ck: pointer to OBFF functional block operating clock
+ * @pipe_ck: pointer to LTSSM and PHY/MAC layer operating clock
+ * @phy: pointer to PHY control block
+ * @slot: port slot
+ * @irq: GIC irq
+ * @irq_domain: legacy INTx IRQ domain
+ * @inner_domain: inner IRQ domain
+ * @msi_domain: MSI IRQ domain
+ * @lock: protect the msi_irq_in_use bitmap
+ * @msi_irq_in_use: bit map for assigned MSI IRQ
+ */
+struct mtk_pcie_port {
+	void __iomem *base;
+	struct list_head list;
+	struct mtk_pcie *pcie;
+	struct reset_control *reset;
+	struct clk *sys_ck;
+	struct clk *ahb_ck;
+	struct clk *axi_ck;
+	struct clk *aux_ck;
+	struct clk *obff_ck;
+	struct clk *pipe_ck;
+	struct phy *phy;
+	u32 slot;
+	int irq;
+	struct irq_domain *irq_domain;
+	struct irq_domain *inner_domain;
+	struct irq_domain *msi_domain;
+	struct mutex lock;
+	DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM);
+};
+
+/**
+ * struct mtk_pcie - PCIe host information
+ * @dev: pointer to PCIe device
+ * @base: IO mapped register base
+ * @free_ck: free-run reference clock
+ * @mem: non-prefetchable memory resource
+ * @ports: pointer to PCIe port information
+ * @soc: pointer to SoC-dependent operations
+ * @busnr: root bus number
+ */
+struct mtk_pcie {
+	struct device *dev;
+	void __iomem *base;
+	struct clk *free_ck;
+
+	struct resource mem;
+	struct list_head ports;
+	const struct mtk_pcie_soc *soc;
+	unsigned int busnr;
+};
+
+static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+
+	clk_disable_unprepare(pcie->free_ck);
+
+	pm_runtime_put_sync(dev);
+	pm_runtime_disable(dev);
+}
+
+static void mtk_pcie_port_free(struct mtk_pcie_port *port)
+{
+	struct mtk_pcie *pcie = port->pcie;
+	struct device *dev = pcie->dev;
+
+	devm_iounmap(dev, port->base);
+	list_del(&port->list);
+	devm_kfree(dev, port);
+}
+
+static void mtk_pcie_put_resources(struct mtk_pcie *pcie)
+{
+	struct mtk_pcie_port *port, *tmp;
+
+	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+		phy_power_off(port->phy);
+		phy_exit(port->phy);
+		clk_disable_unprepare(port->pipe_ck);
+		clk_disable_unprepare(port->obff_ck);
+		clk_disable_unprepare(port->axi_ck);
+		clk_disable_unprepare(port->aux_ck);
+		clk_disable_unprepare(port->ahb_ck);
+		clk_disable_unprepare(port->sys_ck);
+		mtk_pcie_port_free(port);
+	}
+
+	mtk_pcie_subsys_powerdown(pcie);
+}
+
+static int mtk_pcie_check_cfg_cpld(struct mtk_pcie_port *port)
+{
+	u32 val;
+	int err;
+
+	err = readl_poll_timeout_atomic(port->base + PCIE_APP_TLP_REQ, val,
+					!(val & APP_CFG_REQ), 10,
+					100 * USEC_PER_MSEC);
+	if (err)
+		return PCIBIOS_SET_FAILED;
+
+	if (readl(port->base + PCIE_APP_TLP_REQ) & APP_CPL_STATUS)
+		return PCIBIOS_SET_FAILED;
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int mtk_pcie_hw_rd_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn,
+			      int where, int size, u32 *val)
+{
+	u32 tmp;
+
+	/* Write PCIe configuration transaction header for Cfgrd */
+	writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_RD_FMT),
+	       port->base + PCIE_CFG_HEADER0);
+	writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1);
+	writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus),
+	       port->base + PCIE_CFG_HEADER2);
+
+	/* Trigger h/w to transmit Cfgrd TLP */
+	tmp = readl(port->base + PCIE_APP_TLP_REQ);
+	tmp |= APP_CFG_REQ;
+	writel(tmp, port->base + PCIE_APP_TLP_REQ);
+
+	/* Check completion status */
+	if (mtk_pcie_check_cfg_cpld(port))
+		return PCIBIOS_SET_FAILED;
+
+	/* Read cpld payload of Cfgrd */
+	*val = readl(port->base + PCIE_CFG_RDATA);
+
+	if (size == 1)
+		*val = (*val >> (8 * (where & 3))) & 0xff;
+	else if (size == 2)
+		*val = (*val >> (8 * (where & 3))) & 0xffff;
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int mtk_pcie_hw_wr_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn,
+			      int where, int size, u32 val)
+{
+	/* Write PCIe configuration transaction header for Cfgwr */
+	writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_WR_FMT),
+	       port->base + PCIE_CFG_HEADER0);
+	writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1);
+	writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus),
+	       port->base + PCIE_CFG_HEADER2);
+
+	/* Write Cfgwr data */
+	val = val << 8 * (where & 3);
+	writel(val, port->base + PCIE_CFG_WDATA);
+
+	/* Trigger h/w to transmit Cfgwr TLP */
+	val = readl(port->base + PCIE_APP_TLP_REQ);
+	val |= APP_CFG_REQ;
+	writel(val, port->base + PCIE_APP_TLP_REQ);
+
+	/* Check completion status */
+	return mtk_pcie_check_cfg_cpld(port);
+}
+
+static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus,
+						unsigned int devfn)
+{
+	struct mtk_pcie *pcie = bus->sysdata;
+	struct mtk_pcie_port *port;
+	struct pci_dev *dev = NULL;
+
+	/*
+	 * Walk the bus hierarchy to get the devfn value
+	 * of the port in the root bus.
+	 */
+	while (bus && bus->number) {
+		dev = bus->self;
+		bus = dev->bus;
+		devfn = dev->devfn;
+	}
+
+	list_for_each_entry(port, &pcie->ports, list)
+		if (port->slot == PCI_SLOT(devfn))
+			return port;
+
+	return NULL;
+}
+
+static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
+				int where, int size, u32 *val)
+{
+	struct mtk_pcie_port *port;
+	u32 bn = bus->number;
+	int ret;
+
+	port = mtk_pcie_find_port(bus, devfn);
+	if (!port) {
+		*val = ~0;
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+
+	ret = mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val);
+	if (ret)
+		*val = ~0;
+
+	return ret;
+}
+
+static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
+				 int where, int size, u32 val)
+{
+	struct mtk_pcie_port *port;
+	u32 bn = bus->number;
+
+	port = mtk_pcie_find_port(bus, devfn);
+	if (!port)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	return mtk_pcie_hw_wr_cfg(port, bn, devfn, where, size, val);
+}
+
+static struct pci_ops mtk_pcie_ops_v2 = {
+	.read  = mtk_pcie_config_read,
+	.write = mtk_pcie_config_write,
+};
+
+static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+	struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+	phys_addr_t addr;
+
+	/* MT2712/MT7622 only support 32-bit MSI addresses */
+	addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
+	msg->address_hi = 0;
+	msg->address_lo = lower_32_bits(addr);
+
+	msg->data = data->hwirq;
+
+	dev_dbg(port->pcie->dev, "msi#%d address_hi %#x address_lo %#x\n",
+		(int)data->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static int mtk_msi_set_affinity(struct irq_data *irq_data,
+				const struct cpumask *mask, bool force)
+{
+	 return -EINVAL;
+}
+
+static void mtk_msi_ack_irq(struct irq_data *data)
+{
+	struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+	u32 hwirq = data->hwirq;
+
+	writel(1 << hwirq, port->base + PCIE_IMSI_STATUS);
+}
+
+static struct irq_chip mtk_msi_bottom_irq_chip = {
+	.name			= "MTK MSI",
+	.irq_compose_msi_msg	= mtk_compose_msi_msg,
+	.irq_set_affinity	= mtk_msi_set_affinity,
+	.irq_ack		= mtk_msi_ack_irq,
+};
+
+static int mtk_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+				     unsigned int nr_irqs, void *args)
+{
+	struct mtk_pcie_port *port = domain->host_data;
+	unsigned long bit;
+
+	WARN_ON(nr_irqs != 1);
+	mutex_lock(&port->lock);
+
+	bit = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM);
+	if (bit >= MTK_MSI_IRQS_NUM) {
+		mutex_unlock(&port->lock);
+		return -ENOSPC;
+	}
+
+	__set_bit(bit, port->msi_irq_in_use);
+
+	mutex_unlock(&port->lock);
+
+	irq_domain_set_info(domain, virq, bit, &mtk_msi_bottom_irq_chip,
+			    domain->host_data, handle_edge_irq,
+			    NULL, NULL);
+
+	return 0;
+}
+
+static void mtk_pcie_irq_domain_free(struct irq_domain *domain,
+				     unsigned int virq, unsigned int nr_irqs)
+{
+	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+	struct mtk_pcie_port *port = irq_data_get_irq_chip_data(d);
+
+	mutex_lock(&port->lock);
+
+	if (!test_bit(d->hwirq, port->msi_irq_in_use))
+		dev_err(port->pcie->dev, "trying to free unused MSI#%lu\n",
+			d->hwirq);
+	else
+		__clear_bit(d->hwirq, port->msi_irq_in_use);
+
+	mutex_unlock(&port->lock);
+
+	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+	.alloc	= mtk_pcie_irq_domain_alloc,
+	.free	= mtk_pcie_irq_domain_free,
+};
+
+static struct irq_chip mtk_msi_irq_chip = {
+	.name		= "MTK PCIe MSI",
+	.irq_ack	= irq_chip_ack_parent,
+	.irq_mask	= pci_msi_mask_irq,
+	.irq_unmask	= pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info mtk_msi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+		   MSI_FLAG_PCI_MSIX),
+	.chip	= &mtk_msi_irq_chip,
+};
+
+static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port)
+{
+	struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node);
+
+	mutex_init(&port->lock);
+
+	port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM,
+						      &msi_domain_ops, port);
+	if (!port->inner_domain) {
+		dev_err(port->pcie->dev, "failed to create IRQ domain\n");
+		return -ENOMEM;
+	}
+
+	port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info,
+						     port->inner_domain);
+	if (!port->msi_domain) {
+		dev_err(port->pcie->dev, "failed to create MSI domain\n");
+		irq_domain_remove(port->inner_domain);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
+{
+	u32 val;
+	phys_addr_t msg_addr;
+
+	msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR);
+	val = lower_32_bits(msg_addr);
+	writel(val, port->base + PCIE_IMSI_ADDR);
+
+	val = readl(port->base + PCIE_INT_MASK);
+	val &= ~MSI_MASK;
+	writel(val, port->base + PCIE_INT_MASK);
+}
+
+static void mtk_pcie_irq_teardown(struct mtk_pcie *pcie)
+{
+	struct mtk_pcie_port *port, *tmp;
+
+	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+		irq_set_chained_handler_and_data(port->irq, NULL, NULL);
+
+		if (port->irq_domain)
+			irq_domain_remove(port->irq_domain);
+
+		if (IS_ENABLED(CONFIG_PCI_MSI)) {
+			if (port->msi_domain)
+				irq_domain_remove(port->msi_domain);
+			if (port->inner_domain)
+				irq_domain_remove(port->inner_domain);
+		}
+
+		irq_dispose_mapping(port->irq);
+	}
+}
+
+static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+			     irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+	irq_set_chip_data(irq, domain->host_data);
+
+	return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+	.map = mtk_pcie_intx_map,
+};
+
+static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
+				    struct device_node *node)
+{
+	struct device *dev = port->pcie->dev;
+	struct device_node *pcie_intc_node;
+	int ret;
+
+	/* Setup INTx */
+	pcie_intc_node = of_get_next_child(node, NULL);
+	if (!pcie_intc_node) {
+		dev_err(dev, "no PCIe Intc node found\n");
+		return -ENODEV;
+	}
+
+	port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+						 &intx_domain_ops, port);
+	of_node_put(pcie_intc_node);
+	if (!port->irq_domain) {
+		dev_err(dev, "failed to get INTx IRQ domain\n");
+		return -ENODEV;
+	}
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		ret = mtk_pcie_allocate_msi_domains(port);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static void mtk_pcie_intr_handler(struct irq_desc *desc)
+{
+	struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
+	struct irq_chip *irqchip = irq_desc_get_chip(desc);
+	unsigned long status;
+	u32 virq;
+	u32 bit = INTX_SHIFT;
+
+	chained_irq_enter(irqchip, desc);
+
+	status = readl(port->base + PCIE_INT_STATUS);
+	if (status & INTX_MASK) {
+		for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) {
+			/* Clear the INTx */
+			writel(1 << bit, port->base + PCIE_INT_STATUS);
+			virq = irq_find_mapping(port->irq_domain,
+						bit - INTX_SHIFT);
+			generic_handle_irq(virq);
+		}
+	}
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		if (status & MSI_STATUS){
+			unsigned long imsi_status;
+
+			/*
+			 * The interrupt status can be cleared even if the
+			 * MSI status remains pending. As such, given the
+			 * edge-triggered interrupt type, its status should
+			 * be cleared before being dispatched to the
+			 * handler of the underlying device.
+			 */
+			writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
+			while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
+				for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) {
+					virq = irq_find_mapping(port->inner_domain, bit);
+					generic_handle_irq(virq);
+				}
+			}
+		}
+	}
+
+	chained_irq_exit(irqchip, desc);
+}
+
+static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
+			      struct device_node *node)
+{
+	struct mtk_pcie *pcie = port->pcie;
+	struct device *dev = pcie->dev;
+	struct platform_device *pdev = to_platform_device(dev);
+	int err;
+
+	err = mtk_pcie_init_irq_domain(port, node);
+	if (err) {
+		dev_err(dev, "failed to init PCIe IRQ domain\n");
+		return err;
+	}
+
+	port->irq = platform_get_irq(pdev, port->slot);
+	irq_set_chained_handler_and_data(port->irq,
+					 mtk_pcie_intr_handler, port);
+
+	return 0;
+}
+
+static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
+{
+	struct mtk_pcie *pcie = port->pcie;
+	struct resource *mem = &pcie->mem;
+	const struct mtk_pcie_soc *soc = port->pcie->soc;
+	u32 val;
+	int err;
+
+	/* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
+	if (pcie->base) {
+		val = readl(pcie->base + PCIE_SYS_CFG_V2);
+		val |= PCIE_CSR_LTSSM_EN(port->slot) |
+		       PCIE_CSR_ASPM_L1_EN(port->slot);
+		writel(val, pcie->base + PCIE_SYS_CFG_V2);
+	}
+
+	/* Assert all reset signals */
+	writel(0, port->base + PCIE_RST_CTRL);
+
+	/*
+	 * Enable PCIe link down reset, if link status changed from link up to
+	 * link down, this will reset MAC control registers and configuration
+	 * space.
+	 */
+	writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
+
+	/* De-assert PHY, PE, PIPE, MAC and configuration reset	*/
+	val = readl(port->base + PCIE_RST_CTRL);
+	val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
+	       PCIE_MAC_SRSTB | PCIE_CRSTB;
+	writel(val, port->base + PCIE_RST_CTRL);
+
+	/* Set up vendor ID and class code */
+	if (soc->need_fix_class_id) {
+		val = PCI_VENDOR_ID_MEDIATEK;
+		writew(val, port->base + PCIE_CONF_VEND_ID);
+
+		val = PCI_CLASS_BRIDGE_PCI;
+		writew(val, port->base + PCIE_CONF_CLASS_ID);
+	}
+
+	if (soc->need_fix_device_id)
+		writew(soc->device_id, port->base + PCIE_CONF_DEVICE_ID);
+
+	/* 100ms timeout value should be enough for Gen1/2 training */
+	err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
+				 !!(val & PCIE_PORT_LINKUP_V2), 20,
+				 100 * USEC_PER_MSEC);
+	if (err)
+		return -ETIMEDOUT;
+
+	/* Set INTx mask */
+	val = readl(port->base + PCIE_INT_MASK);
+	val &= ~INTX_MASK;
+	writel(val, port->base + PCIE_INT_MASK);
+
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		mtk_pcie_enable_msi(port);
+
+	/* Set AHB to PCIe translation windows */
+	val = lower_32_bits(mem->start) |
+	      AHB2PCIE_SIZE(fls(resource_size(mem)));
+	writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
+
+	val = upper_32_bits(mem->start);
+	writel(val, port->base + PCIE_AHB_TRANS_BASE0_H);
+
+	/* Set PCIe to AXI translation memory space.*/
+	val = PCIE2AHB_SIZE | WIN_ENABLE;
+	writel(val, port->base + PCIE_AXI_WINDOW0);
+
+	return 0;
+}
+
+static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus,
+				      unsigned int devfn, int where)
+{
+	struct mtk_pcie *pcie = bus->sysdata;
+
+	writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn),
+			      bus->number), pcie->base + PCIE_CFG_ADDR);
+
+	return pcie->base + PCIE_CFG_DATA + (where & 3);
+}
+
+static struct pci_ops mtk_pcie_ops = {
+	.map_bus = mtk_pcie_map_bus,
+	.read  = pci_generic_config_read,
+	.write = pci_generic_config_write,
+};
+
+static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
+{
+	struct mtk_pcie *pcie = port->pcie;
+	u32 func = PCI_FUNC(port->slot << 3);
+	u32 slot = PCI_SLOT(port->slot << 3);
+	u32 val;
+	int err;
+
+	/* assert port PERST_N */
+	val = readl(pcie->base + PCIE_SYS_CFG);
+	val |= PCIE_PORT_PERST(port->slot);
+	writel(val, pcie->base + PCIE_SYS_CFG);
+
+	/* de-assert port PERST_N */
+	val = readl(pcie->base + PCIE_SYS_CFG);
+	val &= ~PCIE_PORT_PERST(port->slot);
+	writel(val, pcie->base + PCIE_SYS_CFG);
+
+	/* 100ms timeout value should be enough for Gen1/2 training */
+	err = readl_poll_timeout(port->base + PCIE_LINK_STATUS, val,
+				 !!(val & PCIE_PORT_LINKUP), 20,
+				 100 * USEC_PER_MSEC);
+	if (err)
+		return -ETIMEDOUT;
+
+	/* enable interrupt */
+	val = readl(pcie->base + PCIE_INT_ENABLE);
+	val |= PCIE_PORT_INT_EN(port->slot);
+	writel(val, pcie->base + PCIE_INT_ENABLE);
+
+	/* map to all DDR region. We need to set it before cfg operation. */
+	writel(PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE,
+	       port->base + PCIE_BAR0_SETUP);
+
+	/* configure class code and revision ID */
+	writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS);
+
+	/* configure FC credit */
+	writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
+	       pcie->base + PCIE_CFG_ADDR);
+	val = readl(pcie->base + PCIE_CFG_DATA);
+	val &= ~PCIE_FC_CREDIT_MASK;
+	val |= PCIE_FC_CREDIT_VAL(0x806c);
+	writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
+	       pcie->base + PCIE_CFG_ADDR);
+	writel(val, pcie->base + PCIE_CFG_DATA);
+
+	/* configure RC FTS number to 250 when it leaves L0s */
+	writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
+	       pcie->base + PCIE_CFG_ADDR);
+	val = readl(pcie->base + PCIE_CFG_DATA);
+	val &= ~PCIE_FTS_NUM_MASK;
+	val |= PCIE_FTS_NUM_L0(0x50);
+	writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
+	       pcie->base + PCIE_CFG_ADDR);
+	writel(val, pcie->base + PCIE_CFG_DATA);
+
+	return 0;
+}
+
+static void mtk_pcie_enable_port(struct mtk_pcie_port *port)
+{
+	struct mtk_pcie *pcie = port->pcie;
+	struct device *dev = pcie->dev;
+	int err;
+
+	err = clk_prepare_enable(port->sys_ck);
+	if (err) {
+		dev_err(dev, "failed to enable sys_ck%d clock\n", port->slot);
+		goto err_sys_clk;
+	}
+
+	err = clk_prepare_enable(port->ahb_ck);
+	if (err) {
+		dev_err(dev, "failed to enable ahb_ck%d\n", port->slot);
+		goto err_ahb_clk;
+	}
+
+	err = clk_prepare_enable(port->aux_ck);
+	if (err) {
+		dev_err(dev, "failed to enable aux_ck%d\n", port->slot);
+		goto err_aux_clk;
+	}
+
+	err = clk_prepare_enable(port->axi_ck);
+	if (err) {
+		dev_err(dev, "failed to enable axi_ck%d\n", port->slot);
+		goto err_axi_clk;
+	}
+
+	err = clk_prepare_enable(port->obff_ck);
+	if (err) {
+		dev_err(dev, "failed to enable obff_ck%d\n", port->slot);
+		goto err_obff_clk;
+	}
+
+	err = clk_prepare_enable(port->pipe_ck);
+	if (err) {
+		dev_err(dev, "failed to enable pipe_ck%d\n", port->slot);
+		goto err_pipe_clk;
+	}
+
+	reset_control_assert(port->reset);
+	reset_control_deassert(port->reset);
+
+	err = phy_init(port->phy);
+	if (err) {
+		dev_err(dev, "failed to initialize port%d phy\n", port->slot);
+		goto err_phy_init;
+	}
+
+	err = phy_power_on(port->phy);
+	if (err) {
+		dev_err(dev, "failed to power on port%d phy\n", port->slot);
+		goto err_phy_on;
+	}
+
+	if (!pcie->soc->startup(port))
+		return;
+
+	dev_info(dev, "Port%d link down\n", port->slot);
+
+	phy_power_off(port->phy);
+err_phy_on:
+	phy_exit(port->phy);
+err_phy_init:
+	clk_disable_unprepare(port->pipe_ck);
+err_pipe_clk:
+	clk_disable_unprepare(port->obff_ck);
+err_obff_clk:
+	clk_disable_unprepare(port->axi_ck);
+err_axi_clk:
+	clk_disable_unprepare(port->aux_ck);
+err_aux_clk:
+	clk_disable_unprepare(port->ahb_ck);
+err_ahb_clk:
+	clk_disable_unprepare(port->sys_ck);
+err_sys_clk:
+	mtk_pcie_port_free(port);
+}
+
+static int mtk_pcie_parse_port(struct mtk_pcie *pcie,
+			       struct device_node *node,
+			       int slot)
+{
+	struct mtk_pcie_port *port;
+	struct resource *regs;
+	struct device *dev = pcie->dev;
+	struct platform_device *pdev = to_platform_device(dev);
+	char name[10];
+	int err;
+
+	port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+	if (!port)
+		return -ENOMEM;
+
+	snprintf(name, sizeof(name), "port%d", slot);
+	regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+	port->base = devm_ioremap_resource(dev, regs);
+	if (IS_ERR(port->base)) {
+		dev_err(dev, "failed to map port%d base\n", slot);
+		return PTR_ERR(port->base);
+	}
+
+	snprintf(name, sizeof(name), "sys_ck%d", slot);
+	port->sys_ck = devm_clk_get(dev, name);
+	if (IS_ERR(port->sys_ck)) {
+		dev_err(dev, "failed to get sys_ck%d clock\n", slot);
+		return PTR_ERR(port->sys_ck);
+	}
+
+	/* sys_ck might be divided into the following parts in some chips */
+	snprintf(name, sizeof(name), "ahb_ck%d", slot);
+	port->ahb_ck = devm_clk_get_optional(dev, name);
+	if (IS_ERR(port->ahb_ck))
+		return PTR_ERR(port->ahb_ck);
+
+	snprintf(name, sizeof(name), "axi_ck%d", slot);
+	port->axi_ck = devm_clk_get_optional(dev, name);
+	if (IS_ERR(port->axi_ck))
+		return PTR_ERR(port->axi_ck);
+
+	snprintf(name, sizeof(name), "aux_ck%d", slot);
+	port->aux_ck = devm_clk_get_optional(dev, name);
+	if (IS_ERR(port->aux_ck))
+		return PTR_ERR(port->aux_ck);
+
+	snprintf(name, sizeof(name), "obff_ck%d", slot);
+	port->obff_ck = devm_clk_get_optional(dev, name);
+	if (IS_ERR(port->obff_ck))
+		return PTR_ERR(port->obff_ck);
+
+	snprintf(name, sizeof(name), "pipe_ck%d", slot);
+	port->pipe_ck = devm_clk_get_optional(dev, name);
+	if (IS_ERR(port->pipe_ck))
+		return PTR_ERR(port->pipe_ck);
+
+	snprintf(name, sizeof(name), "pcie-rst%d", slot);
+	port->reset = devm_reset_control_get_optional_exclusive(dev, name);
+	if (PTR_ERR(port->reset) == -EPROBE_DEFER)
+		return PTR_ERR(port->reset);
+
+	/* some platforms may use default PHY setting */
+	snprintf(name, sizeof(name), "pcie-phy%d", slot);
+	port->phy = devm_phy_optional_get(dev, name);
+	if (IS_ERR(port->phy))
+		return PTR_ERR(port->phy);
+
+	port->slot = slot;
+	port->pcie = pcie;
+
+	if (pcie->soc->setup_irq) {
+		err = pcie->soc->setup_irq(port, node);
+		if (err)
+			return err;
+	}
+
+	INIT_LIST_HEAD(&port->list);
+	list_add_tail(&port->list, &pcie->ports);
+
+	return 0;
+}
+
+static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct resource *regs;
+	int err;
+
+	/* get shared registers, which are optional */
+	regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "subsys");
+	if (regs) {
+		pcie->base = devm_ioremap_resource(dev, regs);
+		if (IS_ERR(pcie->base)) {
+			dev_err(dev, "failed to map shared register\n");
+			return PTR_ERR(pcie->base);
+		}
+	}
+
+	pcie->free_ck = devm_clk_get(dev, "free_ck");
+	if (IS_ERR(pcie->free_ck)) {
+		if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+
+		pcie->free_ck = NULL;
+	}
+
+	pm_runtime_enable(dev);
+	pm_runtime_get_sync(dev);
+
+	/* enable top level clock */
+	err = clk_prepare_enable(pcie->free_ck);
+	if (err) {
+		dev_err(dev, "failed to enable free_ck\n");
+		goto err_free_ck;
+	}
+
+	return 0;
+
+err_free_ck:
+	pm_runtime_put_sync(dev);
+	pm_runtime_disable(dev);
+
+	return err;
+}
+
+static int mtk_pcie_setup(struct mtk_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	struct device_node *node = dev->of_node, *child;
+	struct mtk_pcie_port *port, *tmp;
+	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+	struct list_head *windows = &host->windows;
+	struct resource_entry *win, *tmp_win;
+	resource_size_t io_base;
+	int err;
+
+	err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+						    windows, &io_base);
+	if (err)
+		return err;
+
+	err = devm_request_pci_bus_resources(dev, windows);
+	if (err < 0)
+		return err;
+
+	/* Get the I/O and memory ranges from DT */
+	resource_list_for_each_entry_safe(win, tmp_win, windows) {
+		switch (resource_type(win->res)) {
+		case IORESOURCE_IO:
+			err = devm_pci_remap_iospace(dev, win->res, io_base);
+			if (err) {
+				dev_warn(dev, "error %d: failed to map resource %pR\n",
+					 err, win->res);
+				resource_list_destroy_entry(win);
+			}
+			break;
+		case IORESOURCE_MEM:
+			memcpy(&pcie->mem, win->res, sizeof(*win->res));
+			pcie->mem.name = "non-prefetchable";
+			break;
+		case IORESOURCE_BUS:
+			pcie->busnr = win->res->start;
+			break;
+		}
+	}
+
+	for_each_available_child_of_node(node, child) {
+		int slot;
+
+		err = of_pci_get_devfn(child);
+		if (err < 0) {
+			dev_err(dev, "failed to parse devfn: %d\n", err);
+			goto error_put_node;
+		}
+
+		slot = PCI_SLOT(err);
+
+		err = mtk_pcie_parse_port(pcie, child, slot);
+		if (err)
+			goto error_put_node;
+	}
+
+	err = mtk_pcie_subsys_powerup(pcie);
+	if (err)
+		return err;
+
+	/* enable each port, and then check link status */
+	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
+		mtk_pcie_enable_port(port);
+
+	/* power down PCIe subsys if slots are all empty (link down) */
+	if (list_empty(&pcie->ports))
+		mtk_pcie_subsys_powerdown(pcie);
+
+	return 0;
+error_put_node:
+	of_node_put(child);
+	return err;
+}
+
+static int mtk_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct mtk_pcie *pcie;
+	struct pci_host_bridge *host;
+	int err;
+
+	host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+	if (!host)
+		return -ENOMEM;
+
+	pcie = pci_host_bridge_priv(host);
+
+	pcie->dev = dev;
+	pcie->soc = of_device_get_match_data(dev);
+	platform_set_drvdata(pdev, pcie);
+	INIT_LIST_HEAD(&pcie->ports);
+
+	err = mtk_pcie_setup(pcie);
+	if (err)
+		return err;
+
+	host->busnr = pcie->busnr;
+	host->dev.parent = pcie->dev;
+	host->ops = pcie->soc->ops;
+	host->map_irq = of_irq_parse_and_map_pci;
+	host->swizzle_irq = pci_common_swizzle;
+	host->sysdata = pcie;
+
+	err = pci_host_probe(host);
+	if (err)
+		goto put_resources;
+
+	return 0;
+
+put_resources:
+	if (!list_empty(&pcie->ports))
+		mtk_pcie_put_resources(pcie);
+
+	return err;
+}
+
+
+static void mtk_pcie_free_resources(struct mtk_pcie *pcie)
+{
+	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+	struct list_head *windows = &host->windows;
+
+	pci_free_resource_list(windows);
+}
+
+static int mtk_pcie_remove(struct platform_device *pdev)
+{
+	struct mtk_pcie *pcie = platform_get_drvdata(pdev);
+	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+
+	pci_stop_root_bus(host->bus);
+	pci_remove_root_bus(host->bus);
+	mtk_pcie_free_resources(pcie);
+
+	mtk_pcie_irq_teardown(pcie);
+
+	mtk_pcie_put_resources(pcie);
+
+	return 0;
+}
+
+static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
+{
+	struct mtk_pcie *pcie = dev_get_drvdata(dev);
+	struct mtk_pcie_port *port;
+
+	if (list_empty(&pcie->ports))
+		return 0;
+
+	list_for_each_entry(port, &pcie->ports, list) {
+		clk_disable_unprepare(port->pipe_ck);
+		clk_disable_unprepare(port->obff_ck);
+		clk_disable_unprepare(port->axi_ck);
+		clk_disable_unprepare(port->aux_ck);
+		clk_disable_unprepare(port->ahb_ck);
+		clk_disable_unprepare(port->sys_ck);
+		phy_power_off(port->phy);
+		phy_exit(port->phy);
+	}
+
+	clk_disable_unprepare(pcie->free_ck);
+
+	return 0;
+}
+
+static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
+{
+	struct mtk_pcie *pcie = dev_get_drvdata(dev);
+	struct mtk_pcie_port *port, *tmp;
+
+	if (list_empty(&pcie->ports))
+		return 0;
+
+	clk_prepare_enable(pcie->free_ck);
+
+	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
+		mtk_pcie_enable_port(port);
+
+	/* In case of EP was removed while system suspend. */
+	if (list_empty(&pcie->ports))
+		clk_disable_unprepare(pcie->free_ck);
+
+	return 0;
+}
+
+static const struct dev_pm_ops mtk_pcie_pm_ops = {
+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
+				      mtk_pcie_resume_noirq)
+};
+
+static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
+	.ops = &mtk_pcie_ops,
+	.startup = mtk_pcie_startup_port,
+};
+
+static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = {
+	.ops = &mtk_pcie_ops_v2,
+	.startup = mtk_pcie_startup_port_v2,
+	.setup_irq = mtk_pcie_setup_irq,
+};
+
+static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = {
+	.need_fix_class_id = true,
+	.ops = &mtk_pcie_ops_v2,
+	.startup = mtk_pcie_startup_port_v2,
+	.setup_irq = mtk_pcie_setup_irq,
+};
+
+static const struct mtk_pcie_soc mtk_pcie_soc_mt7629 = {
+	.need_fix_class_id = true,
+	.need_fix_device_id = true,
+	.device_id = PCI_DEVICE_ID_MEDIATEK_7629,
+	.ops = &mtk_pcie_ops_v2,
+	.startup = mtk_pcie_startup_port_v2,
+	.setup_irq = mtk_pcie_setup_irq,
+};
+
+static const struct of_device_id mtk_pcie_ids[] = {
+	{ .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 },
+	{ .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 },
+	{ .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 },
+	{ .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 },
+	{ .compatible = "mediatek,mt7629-pcie", .data = &mtk_pcie_soc_mt7629 },
+	{},
+};
+
+static struct platform_driver mtk_pcie_driver = {
+	.probe = mtk_pcie_probe,
+	.remove = mtk_pcie_remove,
+	.driver = {
+		.name = "mtk-pcie",
+		.of_match_table = mtk_pcie_ids,
+		.suppress_bind_attrs = true,
+		.pm = &mtk_pcie_pm_ops,
+	},
+};
+module_platform_driver(mtk_pcie_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/marvell/linux/drivers/pci/controller/pcie-mobiveil.c b/marvell/linux/drivers/pci/controller/pcie-mobiveil.c
new file mode 100644
index 0000000..32f37d0
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pcie-mobiveil.c
@@ -0,0 +1,969 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Mobiveil PCIe Host controller
+ *
+ * Copyright (c) 2018 Mobiveil Inc.
+ * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "../pci.h"
+
+/* register offsets and bit positions */
+
+/*
+ * translation tables are grouped into windows, each window registers are
+ * grouped into blocks of 4 or 16 registers each
+ */
+#define PAB_REG_BLOCK_SIZE		16
+#define PAB_EXT_REG_BLOCK_SIZE		4
+
+#define PAB_REG_ADDR(offset, win)	\
+	(offset + (win * PAB_REG_BLOCK_SIZE))
+#define PAB_EXT_REG_ADDR(offset, win)	\
+	(offset + (win * PAB_EXT_REG_BLOCK_SIZE))
+
+#define LTSSM_STATUS			0x0404
+#define  LTSSM_STATUS_L0_MASK		0x3f
+#define  LTSSM_STATUS_L0		0x2d
+
+#define PAB_CTRL			0x0808
+#define  AMBA_PIO_ENABLE_SHIFT		0
+#define  PEX_PIO_ENABLE_SHIFT		1
+#define  PAGE_SEL_SHIFT			13
+#define  PAGE_SEL_MASK			0x3f
+#define  PAGE_LO_MASK			0x3ff
+#define  PAGE_SEL_OFFSET_SHIFT		10
+
+#define PAB_AXI_PIO_CTRL		0x0840
+#define  APIO_EN_MASK			0xf
+
+#define PAB_PEX_PIO_CTRL		0x08c0
+#define  PIO_ENABLE_SHIFT		0
+
+#define PAB_INTP_AMBA_MISC_ENB		0x0b0c
+#define PAB_INTP_AMBA_MISC_STAT		0x0b1c
+#define  PAB_INTP_INTX_MASK		0x01e0
+#define  PAB_INTP_MSI_MASK		0x8
+
+#define PAB_AXI_AMAP_CTRL(win)		PAB_REG_ADDR(0x0ba0, win)
+#define  WIN_ENABLE_SHIFT		0
+#define  WIN_TYPE_SHIFT			1
+#define  WIN_TYPE_MASK			0x3
+#define  WIN_SIZE_MASK			0xfffffc00
+
+#define PAB_EXT_AXI_AMAP_SIZE(win)	PAB_EXT_REG_ADDR(0xbaf0, win)
+
+#define PAB_EXT_AXI_AMAP_AXI_WIN(win)	PAB_EXT_REG_ADDR(0x80a0, win)
+#define PAB_AXI_AMAP_AXI_WIN(win)	PAB_REG_ADDR(0x0ba4, win)
+#define  AXI_WINDOW_ALIGN_MASK		3
+
+#define PAB_AXI_AMAP_PEX_WIN_L(win)	PAB_REG_ADDR(0x0ba8, win)
+#define  PAB_BUS_SHIFT			24
+#define  PAB_DEVICE_SHIFT		19
+#define  PAB_FUNCTION_SHIFT		16
+
+#define PAB_AXI_AMAP_PEX_WIN_H(win)	PAB_REG_ADDR(0x0bac, win)
+#define PAB_INTP_AXI_PIO_CLASS		0x474
+
+#define PAB_PEX_AMAP_CTRL(win)		PAB_REG_ADDR(0x4ba0, win)
+#define  AMAP_CTRL_EN_SHIFT		0
+#define  AMAP_CTRL_TYPE_SHIFT		1
+#define  AMAP_CTRL_TYPE_MASK		3
+
+#define PAB_EXT_PEX_AMAP_SIZEN(win)	PAB_EXT_REG_ADDR(0xbef0, win)
+#define PAB_EXT_PEX_AMAP_AXI_WIN(win)	PAB_EXT_REG_ADDR(0xb4a0, win)
+#define PAB_PEX_AMAP_AXI_WIN(win)	PAB_REG_ADDR(0x4ba4, win)
+#define PAB_PEX_AMAP_PEX_WIN_L(win)	PAB_REG_ADDR(0x4ba8, win)
+#define PAB_PEX_AMAP_PEX_WIN_H(win)	PAB_REG_ADDR(0x4bac, win)
+
+/* starting offset of INTX bits in status register */
+#define PAB_INTX_START			5
+
+/* supported number of MSI interrupts */
+#define PCI_NUM_MSI			16
+
+/* MSI registers */
+#define MSI_BASE_LO_OFFSET		0x04
+#define MSI_BASE_HI_OFFSET		0x08
+#define MSI_SIZE_OFFSET			0x0c
+#define MSI_ENABLE_OFFSET		0x14
+#define MSI_STATUS_OFFSET		0x18
+#define MSI_DATA_OFFSET			0x20
+#define MSI_ADDR_L_OFFSET		0x24
+#define MSI_ADDR_H_OFFSET		0x28
+
+/* outbound and inbound window definitions */
+#define WIN_NUM_0			0
+#define WIN_NUM_1			1
+#define CFG_WINDOW_TYPE			0
+#define IO_WINDOW_TYPE			1
+#define MEM_WINDOW_TYPE			2
+#define IB_WIN_SIZE			((u64)256 * 1024 * 1024 * 1024)
+#define MAX_PIO_WINDOWS			8
+
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES		10
+#define LINK_WAIT_MIN			90000
+#define LINK_WAIT_MAX			100000
+
+#define PAGED_ADDR_BNDRY		0xc00
+#define OFFSET_TO_PAGE_ADDR(off)	\
+	((off & PAGE_LO_MASK) | PAGED_ADDR_BNDRY)
+#define OFFSET_TO_PAGE_IDX(off)		\
+	((off >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK)
+
+struct mobiveil_msi {			/* MSI information */
+	struct mutex lock;		/* protect bitmap variable */
+	struct irq_domain *msi_domain;
+	struct irq_domain *dev_domain;
+	phys_addr_t msi_pages_phys;
+	int num_of_vectors;
+	DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI);
+};
+
+struct mobiveil_pcie {
+	struct platform_device *pdev;
+	struct list_head resources;
+	void __iomem *config_axi_slave_base;	/* endpoint config base */
+	void __iomem *csr_axi_slave_base;	/* root port config base */
+	void __iomem *apb_csr_base;	/* MSI register base */
+	phys_addr_t pcie_reg_base;	/* Physical PCIe Controller Base */
+	struct irq_domain *intx_domain;
+	raw_spinlock_t intx_mask_lock;
+	int irq;
+	int apio_wins;
+	int ppio_wins;
+	int ob_wins_configured;		/* configured outbound windows */
+	int ib_wins_configured;		/* configured inbound windows */
+	struct resource *ob_io_res;
+	char root_bus_nr;
+	struct mobiveil_msi msi;
+};
+
+/*
+ * mobiveil_pcie_sel_page - routine to access paged register
+ *
+ * Registers whose address greater than PAGED_ADDR_BNDRY (0xc00) are paged,
+ * for this scheme to work extracted higher 6 bits of the offset will be
+ * written to pg_sel field of PAB_CTRL register and rest of the lower 10
+ * bits enabled with PAGED_ADDR_BNDRY are used as offset of the register.
+ */
+static void mobiveil_pcie_sel_page(struct mobiveil_pcie *pcie, u8 pg_idx)
+{
+	u32 val;
+
+	val = readl(pcie->csr_axi_slave_base + PAB_CTRL);
+	val &= ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT);
+	val |= (pg_idx & PAGE_SEL_MASK) << PAGE_SEL_SHIFT;
+
+	writel(val, pcie->csr_axi_slave_base + PAB_CTRL);
+}
+
+static void *mobiveil_pcie_comp_addr(struct mobiveil_pcie *pcie, u32 off)
+{
+	if (off < PAGED_ADDR_BNDRY) {
+		/* For directly accessed registers, clear the pg_sel field */
+		mobiveil_pcie_sel_page(pcie, 0);
+		return pcie->csr_axi_slave_base + off;
+	}
+
+	mobiveil_pcie_sel_page(pcie, OFFSET_TO_PAGE_IDX(off));
+	return pcie->csr_axi_slave_base + OFFSET_TO_PAGE_ADDR(off);
+}
+
+static int mobiveil_pcie_read(void __iomem *addr, int size, u32 *val)
+{
+	if ((uintptr_t)addr & (size - 1)) {
+		*val = 0;
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+	}
+
+	switch (size) {
+	case 4:
+		*val = readl(addr);
+		break;
+	case 2:
+		*val = readw(addr);
+		break;
+	case 1:
+		*val = readb(addr);
+		break;
+	default:
+		*val = 0;
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+	}
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val)
+{
+	if ((uintptr_t)addr & (size - 1))
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+
+	switch (size) {
+	case 4:
+		writel(val, addr);
+		break;
+	case 2:
+		writew(val, addr);
+		break;
+	case 1:
+		writeb(val, addr);
+		break;
+	default:
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+	}
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static u32 mobiveil_csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
+{
+	void *addr;
+	u32 val;
+	int ret;
+
+	addr = mobiveil_pcie_comp_addr(pcie, off);
+
+	ret = mobiveil_pcie_read(addr, size, &val);
+	if (ret)
+		dev_err(&pcie->pdev->dev, "read CSR address failed\n");
+
+	return val;
+}
+
+static void mobiveil_csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off,
+			       size_t size)
+{
+	void *addr;
+	int ret;
+
+	addr = mobiveil_pcie_comp_addr(pcie, off);
+
+	ret = mobiveil_pcie_write(addr, size, val);
+	if (ret)
+		dev_err(&pcie->pdev->dev, "write CSR address failed\n");
+}
+
+static u32 mobiveil_csr_readl(struct mobiveil_pcie *pcie, u32 off)
+{
+	return mobiveil_csr_read(pcie, off, 0x4);
+}
+
+static void mobiveil_csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
+{
+	mobiveil_csr_write(pcie, val, off, 0x4);
+}
+
+static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
+{
+	return (mobiveil_csr_readl(pcie, LTSSM_STATUS) &
+		LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
+}
+
+static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
+{
+	struct mobiveil_pcie *pcie = bus->sysdata;
+
+	/* Only one device down on each root port */
+	if ((bus->number == pcie->root_bus_nr) && (devfn > 0))
+		return false;
+
+	/*
+	 * Do not read more than one device on the bus directly
+	 * attached to RC
+	 */
+	if ((bus->primary == pcie->root_bus_nr) && (PCI_SLOT(devfn) > 0))
+		return false;
+
+	return true;
+}
+
+/*
+ * mobiveil_pcie_map_bus - routine to get the configuration base of either
+ * root port or endpoint
+ */
+static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
+					   unsigned int devfn, int where)
+{
+	struct mobiveil_pcie *pcie = bus->sysdata;
+	u32 value;
+
+	if (!mobiveil_pcie_valid_device(bus, devfn))
+		return NULL;
+
+	/* RC config access */
+	if (bus->number == pcie->root_bus_nr)
+		return pcie->csr_axi_slave_base + where;
+
+	/*
+	 * EP config access (in Config/APIO space)
+	 * Program PEX Address base (31..16 bits) with appropriate value
+	 * (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register.
+	 * Relies on pci_lock serialization
+	 */
+	value = bus->number << PAB_BUS_SHIFT |
+		PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
+		PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT;
+
+	mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
+
+	return pcie->config_axi_slave_base + where;
+}
+
+static struct pci_ops mobiveil_pcie_ops = {
+	.map_bus = mobiveil_pcie_map_bus,
+	.read = pci_generic_config_read,
+	.write = pci_generic_config_write,
+};
+
+static void mobiveil_pcie_isr(struct irq_desc *desc)
+{
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc);
+	struct device *dev = &pcie->pdev->dev;
+	struct mobiveil_msi *msi = &pcie->msi;
+	u32 msi_data, msi_addr_lo, msi_addr_hi;
+	u32 intr_status, msi_status;
+	unsigned long shifted_status;
+	u32 bit, virq, val, mask;
+
+	/*
+	 * The core provides a single interrupt for both INTx/MSI messages.
+	 * So we'll read both INTx and MSI status
+	 */
+
+	chained_irq_enter(chip, desc);
+
+	/* read INTx status */
+	val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
+	mask = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+	intr_status = val & mask;
+
+	/* Handle INTx */
+	if (intr_status & PAB_INTP_INTX_MASK) {
+		shifted_status = mobiveil_csr_readl(pcie,
+						    PAB_INTP_AMBA_MISC_STAT);
+		shifted_status &= PAB_INTP_INTX_MASK;
+		shifted_status >>= PAB_INTX_START;
+		do {
+			for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
+				virq = irq_find_mapping(pcie->intx_domain,
+							bit + 1);
+				if (virq)
+					generic_handle_irq(virq);
+				else
+					dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n",
+							    bit);
+
+				/* clear interrupt handled */
+				mobiveil_csr_writel(pcie,
+						    1 << (PAB_INTX_START + bit),
+						    PAB_INTP_AMBA_MISC_STAT);
+			}
+
+			shifted_status = mobiveil_csr_readl(pcie,
+							    PAB_INTP_AMBA_MISC_STAT);
+			shifted_status &= PAB_INTP_INTX_MASK;
+			shifted_status >>= PAB_INTX_START;
+		} while (shifted_status != 0);
+	}
+
+	/* read extra MSI status register */
+	msi_status = readl_relaxed(pcie->apb_csr_base + MSI_STATUS_OFFSET);
+
+	/* handle MSI interrupts */
+	while (msi_status & 1) {
+		msi_data = readl_relaxed(pcie->apb_csr_base + MSI_DATA_OFFSET);
+
+		/*
+		 * MSI_STATUS_OFFSET register gets updated to zero
+		 * once we pop not only the MSI data but also address
+		 * from MSI hardware FIFO. So keeping these following
+		 * two dummy reads.
+		 */
+		msi_addr_lo = readl_relaxed(pcie->apb_csr_base +
+					    MSI_ADDR_L_OFFSET);
+		msi_addr_hi = readl_relaxed(pcie->apb_csr_base +
+					    MSI_ADDR_H_OFFSET);
+		dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n",
+			msi_data, msi_addr_hi, msi_addr_lo);
+
+		virq = irq_find_mapping(msi->dev_domain, msi_data);
+		if (virq)
+			generic_handle_irq(virq);
+
+		msi_status = readl_relaxed(pcie->apb_csr_base +
+					   MSI_STATUS_OFFSET);
+	}
+
+	/* Clear the interrupt status */
+	mobiveil_csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
+	chained_irq_exit(chip, desc);
+}
+
+static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
+{
+	struct device *dev = &pcie->pdev->dev;
+	struct platform_device *pdev = pcie->pdev;
+	struct device_node *node = dev->of_node;
+	struct resource *res;
+
+	/* map config resource */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "config_axi_slave");
+	pcie->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
+	if (IS_ERR(pcie->config_axi_slave_base))
+		return PTR_ERR(pcie->config_axi_slave_base);
+	pcie->ob_io_res = res;
+
+	/* map csr resource */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "csr_axi_slave");
+	pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
+	if (IS_ERR(pcie->csr_axi_slave_base))
+		return PTR_ERR(pcie->csr_axi_slave_base);
+	pcie->pcie_reg_base = res->start;
+
+	/* map MSI config resource */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb_csr");
+	pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res);
+	if (IS_ERR(pcie->apb_csr_base))
+		return PTR_ERR(pcie->apb_csr_base);
+
+	/* read the number of windows requested */
+	if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins))
+		pcie->apio_wins = MAX_PIO_WINDOWS;
+
+	if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins))
+		pcie->ppio_wins = MAX_PIO_WINDOWS;
+
+	pcie->irq = platform_get_irq(pdev, 0);
+	if (pcie->irq <= 0) {
+		dev_err(dev, "failed to map IRQ: %d\n", pcie->irq);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
+			       u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
+{
+	u32 value;
+	u64 size64 = ~(size - 1);
+
+	if (win_num >= pcie->ppio_wins) {
+		dev_err(&pcie->pdev->dev,
+			"ERROR: max inbound windows reached !\n");
+		return;
+	}
+
+	value = mobiveil_csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
+	value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK);
+	value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT |
+		 (lower_32_bits(size64) & WIN_SIZE_MASK);
+	mobiveil_csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
+
+	mobiveil_csr_writel(pcie, upper_32_bits(size64),
+			    PAB_EXT_PEX_AMAP_SIZEN(win_num));
+
+	mobiveil_csr_writel(pcie, lower_32_bits(cpu_addr),
+			    PAB_PEX_AMAP_AXI_WIN(win_num));
+	mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
+			    PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
+
+	mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
+			    PAB_PEX_AMAP_PEX_WIN_L(win_num));
+	mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
+			    PAB_PEX_AMAP_PEX_WIN_H(win_num));
+
+	pcie->ib_wins_configured++;
+}
+
+/*
+ * routine to program the outbound windows
+ */
+static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
+			       u64 cpu_addr, u64 pci_addr, u32 type, u64 size)
+{
+	u32 value;
+	u64 size64 = ~(size - 1);
+
+	if (win_num >= pcie->apio_wins) {
+		dev_err(&pcie->pdev->dev,
+			"ERROR: max outbound windows reached !\n");
+		return;
+	}
+
+	/*
+	 * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
+	 * to 4 KB in PAB_AXI_AMAP_CTRL register
+	 */
+	value = mobiveil_csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
+	value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK);
+	value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
+		 (lower_32_bits(size64) & WIN_SIZE_MASK);
+	mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
+
+	mobiveil_csr_writel(pcie, upper_32_bits(size64),
+			    PAB_EXT_AXI_AMAP_SIZE(win_num));
+
+	/*
+	 * program AXI window base with appropriate value in
+	 * PAB_AXI_AMAP_AXI_WIN0 register
+	 */
+	mobiveil_csr_writel(pcie,
+			    lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
+			    PAB_AXI_AMAP_AXI_WIN(win_num));
+	mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
+			    PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
+
+	mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
+			    PAB_AXI_AMAP_PEX_WIN_L(win_num));
+	mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
+			    PAB_AXI_AMAP_PEX_WIN_H(win_num));
+
+	pcie->ob_wins_configured++;
+}
+
+static int mobiveil_bringup_link(struct mobiveil_pcie *pcie)
+{
+	int retries;
+
+	/* check if the link is up or not */
+	for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+		if (mobiveil_pcie_link_up(pcie))
+			return 0;
+
+		usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
+	}
+
+	dev_err(&pcie->pdev->dev, "link never came up\n");
+
+	return -ETIMEDOUT;
+}
+
+static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
+{
+	phys_addr_t msg_addr = pcie->pcie_reg_base;
+	struct mobiveil_msi *msi = &pcie->msi;
+
+	pcie->msi.num_of_vectors = PCI_NUM_MSI;
+	msi->msi_pages_phys = (phys_addr_t)msg_addr;
+
+	writel_relaxed(lower_32_bits(msg_addr),
+		       pcie->apb_csr_base + MSI_BASE_LO_OFFSET);
+	writel_relaxed(upper_32_bits(msg_addr),
+		       pcie->apb_csr_base + MSI_BASE_HI_OFFSET);
+	writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET);
+	writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET);
+}
+
+static int mobiveil_host_init(struct mobiveil_pcie *pcie)
+{
+	u32 value, pab_ctrl, type;
+	struct resource_entry *win;
+
+	/* setup bus numbers */
+	value = mobiveil_csr_readl(pcie, PCI_PRIMARY_BUS);
+	value &= 0xff000000;
+	value |= 0x00ff0100;
+	mobiveil_csr_writel(pcie, value, PCI_PRIMARY_BUS);
+
+	/*
+	 * program Bus Master Enable Bit in Command Register in PAB Config
+	 * Space
+	 */
+	value = mobiveil_csr_readl(pcie, PCI_COMMAND);
+	value |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
+	mobiveil_csr_writel(pcie, value, PCI_COMMAND);
+
+	/*
+	 * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
+	 * register
+	 */
+	pab_ctrl = mobiveil_csr_readl(pcie, PAB_CTRL);
+	pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT);
+	mobiveil_csr_writel(pcie, pab_ctrl, PAB_CTRL);
+
+	mobiveil_csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
+			    PAB_INTP_AMBA_MISC_ENB);
+
+	/*
+	 * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
+	 * PAB_AXI_PIO_CTRL Register
+	 */
+	value = mobiveil_csr_readl(pcie, PAB_AXI_PIO_CTRL);
+	value |= APIO_EN_MASK;
+	mobiveil_csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
+
+	/* Enable PCIe PIO master */
+	value = mobiveil_csr_readl(pcie, PAB_PEX_PIO_CTRL);
+	value |= 1 << PIO_ENABLE_SHIFT;
+	mobiveil_csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
+
+	/*
+	 * we'll program one outbound window for config reads and
+	 * another default inbound window for all the upstream traffic
+	 * rest of the outbound windows will be configured according to
+	 * the "ranges" field defined in device tree
+	 */
+
+	/* config outbound translation window */
+	program_ob_windows(pcie, WIN_NUM_0, pcie->ob_io_res->start, 0,
+			   CFG_WINDOW_TYPE, resource_size(pcie->ob_io_res));
+
+	/* memory inbound translation window */
+	program_ib_windows(pcie, WIN_NUM_0, 0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
+
+	/* Get the I/O and memory ranges from DT */
+	resource_list_for_each_entry(win, &pcie->resources) {
+		if (resource_type(win->res) == IORESOURCE_MEM)
+			type = MEM_WINDOW_TYPE;
+		else if (resource_type(win->res) == IORESOURCE_IO)
+			type = IO_WINDOW_TYPE;
+		else
+			continue;
+
+		/* configure outbound translation window */
+		program_ob_windows(pcie, pcie->ob_wins_configured,
+				   win->res->start,
+				   win->res->start - win->offset,
+				   type, resource_size(win->res));
+	}
+
+	/* fixup for PCIe class register */
+	value = mobiveil_csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
+	value &= 0xff;
+	value |= (PCI_CLASS_BRIDGE_PCI << 16);
+	mobiveil_csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
+
+	/* setup MSI hardware registers */
+	mobiveil_pcie_enable_msi(pcie);
+
+	return 0;
+}
+
+static void mobiveil_mask_intx_irq(struct irq_data *data)
+{
+	struct irq_desc *desc = irq_to_desc(data->irq);
+	struct mobiveil_pcie *pcie;
+	unsigned long flags;
+	u32 mask, shifted_val;
+
+	pcie = irq_desc_get_chip_data(desc);
+	mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
+	raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
+	shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+	shifted_val &= ~mask;
+	mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
+	raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
+}
+
+static void mobiveil_unmask_intx_irq(struct irq_data *data)
+{
+	struct irq_desc *desc = irq_to_desc(data->irq);
+	struct mobiveil_pcie *pcie;
+	unsigned long flags;
+	u32 shifted_val, mask;
+
+	pcie = irq_desc_get_chip_data(desc);
+	mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
+	raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
+	shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+	shifted_val |= mask;
+	mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
+	raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
+}
+
+static struct irq_chip intx_irq_chip = {
+	.name = "mobiveil_pcie:intx",
+	.irq_enable = mobiveil_unmask_intx_irq,
+	.irq_disable = mobiveil_mask_intx_irq,
+	.irq_mask = mobiveil_mask_intx_irq,
+	.irq_unmask = mobiveil_unmask_intx_irq,
+};
+
+/* routine to setup the INTx related data */
+static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+				  irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq);
+	irq_set_chip_data(irq, domain->host_data);
+
+	return 0;
+}
+
+/* INTx domain operations structure */
+static const struct irq_domain_ops intx_domain_ops = {
+	.map = mobiveil_pcie_intx_map,
+};
+
+static struct irq_chip mobiveil_msi_irq_chip = {
+	.name = "Mobiveil PCIe MSI",
+	.irq_mask = pci_msi_mask_irq,
+	.irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info mobiveil_msi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+		   MSI_FLAG_PCI_MSIX),
+	.chip	= &mobiveil_msi_irq_chip,
+};
+
+static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+	struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
+	phys_addr_t addr = pcie->pcie_reg_base + (data->hwirq * sizeof(int));
+
+	msg->address_lo = lower_32_bits(addr);
+	msg->address_hi = upper_32_bits(addr);
+	msg->data = data->hwirq;
+
+	dev_dbg(&pcie->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n",
+		(int)data->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
+				     const struct cpumask *mask, bool force)
+{
+	return -EINVAL;
+}
+
+static struct irq_chip mobiveil_msi_bottom_irq_chip = {
+	.name			= "Mobiveil MSI",
+	.irq_compose_msi_msg	= mobiveil_compose_msi_msg,
+	.irq_set_affinity	= mobiveil_msi_set_affinity,
+};
+
+static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
+					 unsigned int virq,
+					 unsigned int nr_irqs, void *args)
+{
+	struct mobiveil_pcie *pcie = domain->host_data;
+	struct mobiveil_msi *msi = &pcie->msi;
+	unsigned long bit;
+
+	WARN_ON(nr_irqs != 1);
+	mutex_lock(&msi->lock);
+
+	bit = find_first_zero_bit(msi->msi_irq_in_use, msi->num_of_vectors);
+	if (bit >= msi->num_of_vectors) {
+		mutex_unlock(&msi->lock);
+		return -ENOSPC;
+	}
+
+	set_bit(bit, msi->msi_irq_in_use);
+
+	mutex_unlock(&msi->lock);
+
+	irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip,
+			    domain->host_data, handle_level_irq, NULL, NULL);
+	return 0;
+}
+
+static void mobiveil_irq_msi_domain_free(struct irq_domain *domain,
+					 unsigned int virq,
+					 unsigned int nr_irqs)
+{
+	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+	struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d);
+	struct mobiveil_msi *msi = &pcie->msi;
+
+	mutex_lock(&msi->lock);
+
+	if (!test_bit(d->hwirq, msi->msi_irq_in_use))
+		dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n",
+			d->hwirq);
+	else
+		__clear_bit(d->hwirq, msi->msi_irq_in_use);
+
+	mutex_unlock(&msi->lock);
+}
+static const struct irq_domain_ops msi_domain_ops = {
+	.alloc	= mobiveil_irq_msi_domain_alloc,
+	.free	= mobiveil_irq_msi_domain_free,
+};
+
+static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
+{
+	struct device *dev = &pcie->pdev->dev;
+	struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
+	struct mobiveil_msi *msi = &pcie->msi;
+
+	mutex_init(&pcie->msi.lock);
+	msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
+						&msi_domain_ops, pcie);
+	if (!msi->dev_domain) {
+		dev_err(dev, "failed to create IRQ domain\n");
+		return -ENOMEM;
+	}
+
+	msi->msi_domain = pci_msi_create_irq_domain(fwnode,
+						    &mobiveil_msi_domain_info,
+						    msi->dev_domain);
+	if (!msi->msi_domain) {
+		dev_err(dev, "failed to create MSI domain\n");
+		irq_domain_remove(msi->dev_domain);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
+{
+	struct device *dev = &pcie->pdev->dev;
+	struct device_node *node = dev->of_node;
+	int ret;
+
+	/* setup INTx */
+	pcie->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
+						  &intx_domain_ops, pcie);
+
+	if (!pcie->intx_domain) {
+		dev_err(dev, "Failed to get a INTx IRQ domain\n");
+		return -ENOMEM;
+	}
+
+	raw_spin_lock_init(&pcie->intx_mask_lock);
+
+	/* setup MSI */
+	ret = mobiveil_allocate_msi_domains(pcie);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int mobiveil_pcie_probe(struct platform_device *pdev)
+{
+	struct mobiveil_pcie *pcie;
+	struct pci_bus *bus;
+	struct pci_bus *child;
+	struct pci_host_bridge *bridge;
+	struct device *dev = &pdev->dev;
+	resource_size_t iobase;
+	int ret;
+
+	/* allocate the PCIe port */
+	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+	if (!bridge)
+		return -ENOMEM;
+
+	pcie = pci_host_bridge_priv(bridge);
+
+	pcie->pdev = pdev;
+
+	ret = mobiveil_pcie_parse_dt(pcie);
+	if (ret) {
+		dev_err(dev, "Parsing DT failed, ret: %x\n", ret);
+		return ret;
+	}
+
+	INIT_LIST_HEAD(&pcie->resources);
+
+	/* parse the host bridge base addresses from the device tree file */
+	ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+						    &pcie->resources, &iobase);
+	if (ret) {
+		dev_err(dev, "Getting bridge resources failed\n");
+		return ret;
+	}
+
+	/*
+	 * configure all inbound and outbound windows and prepare the RC for
+	 * config access
+	 */
+	ret = mobiveil_host_init(pcie);
+	if (ret) {
+		dev_err(dev, "Failed to initialize host\n");
+		goto error;
+	}
+
+	/* initialize the IRQ domains */
+	ret = mobiveil_pcie_init_irq_domain(pcie);
+	if (ret) {
+		dev_err(dev, "Failed creating IRQ Domain\n");
+		goto error;
+	}
+
+	irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie);
+
+	ret = devm_request_pci_bus_resources(dev, &pcie->resources);
+	if (ret)
+		goto error;
+
+	/* Initialize bridge */
+	list_splice_init(&pcie->resources, &bridge->windows);
+	bridge->dev.parent = dev;
+	bridge->sysdata = pcie;
+	bridge->busnr = pcie->root_bus_nr;
+	bridge->ops = &mobiveil_pcie_ops;
+	bridge->map_irq = of_irq_parse_and_map_pci;
+	bridge->swizzle_irq = pci_common_swizzle;
+
+	ret = mobiveil_bringup_link(pcie);
+	if (ret) {
+		dev_info(dev, "link bring-up failed\n");
+		goto error;
+	}
+
+	/* setup the kernel resources for the newly added PCIe root bus */
+	ret = pci_scan_root_bus_bridge(bridge);
+	if (ret)
+		goto error;
+
+	bus = bridge->bus;
+
+	pci_assign_unassigned_bus_resources(bus);
+	list_for_each_entry(child, &bus->children, node)
+		pcie_bus_configure_settings(child);
+	pci_bus_add_devices(bus);
+
+	return 0;
+error:
+	pci_free_resource_list(&pcie->resources);
+	return ret;
+}
+
+static const struct of_device_id mobiveil_pcie_of_match[] = {
+	{.compatible = "mbvl,gpex40-pcie",},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match);
+
+static struct platform_driver mobiveil_pcie_driver = {
+	.probe = mobiveil_pcie_probe,
+	.driver = {
+		.name = "mobiveil-pcie",
+		.of_match_table = mobiveil_pcie_of_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+builtin_platform_driver(mobiveil_pcie_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Mobiveil PCIe host controller driver");
+MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>");
diff --git a/marvell/linux/drivers/pci/controller/pcie-rcar.c b/marvell/linux/drivers/pci/controller/pcie-rcar.c
new file mode 100644
index 0000000..0411435
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pcie-rcar.c
@@ -0,0 +1,1264 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe driver for Renesas R-Car SoCs
+ *  Copyright (C) 2014 Renesas Electronics Europe Ltd
+ *
+ * Based on:
+ *  arch/sh/drivers/pci/pcie-sh7786.c
+ *  arch/sh/drivers/pci/ops-sh7786.c
+ *  Copyright (C) 2009 - 2011  Paul Mundt
+ *
+ * Author: Phil Edworthy <phil.edworthy@renesas.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include "../pci.h"
+
+#define PCIECAR			0x000010
+#define PCIECCTLR		0x000018
+#define  CONFIG_SEND_ENABLE	BIT(31)
+#define  TYPE0			(0 << 8)
+#define  TYPE1			BIT(8)
+#define PCIECDR			0x000020
+#define PCIEMSR			0x000028
+#define PCIEINTXR		0x000400
+#define PCIEPHYSR		0x0007f0
+#define  PHYRDY			BIT(0)
+#define PCIEMSITXR		0x000840
+
+/* Transfer control */
+#define PCIETCTLR		0x02000
+#define  DL_DOWN		BIT(3)
+#define  CFINIT			BIT(0)
+#define PCIETSTR		0x02004
+#define  DATA_LINK_ACTIVE	BIT(0)
+#define PCIEERRFR		0x02020
+#define  UNSUPPORTED_REQUEST	BIT(4)
+#define PCIEMSIFR		0x02044
+#define PCIEMSIALR		0x02048
+#define  MSIFE			BIT(0)
+#define PCIEMSIAUR		0x0204c
+#define PCIEMSIIER		0x02050
+
+/* root port address */
+#define PCIEPRAR(x)		(0x02080 + ((x) * 0x4))
+
+/* local address reg & mask */
+#define PCIELAR(x)		(0x02200 + ((x) * 0x20))
+#define PCIELAMR(x)		(0x02208 + ((x) * 0x20))
+#define  LAM_PREFETCH		BIT(3)
+#define  LAM_64BIT		BIT(2)
+#define  LAR_ENABLE		BIT(1)
+
+/* PCIe address reg & mask */
+#define PCIEPALR(x)		(0x03400 + ((x) * 0x20))
+#define PCIEPAUR(x)		(0x03404 + ((x) * 0x20))
+#define PCIEPAMR(x)		(0x03408 + ((x) * 0x20))
+#define PCIEPTCTLR(x)		(0x0340c + ((x) * 0x20))
+#define  PAR_ENABLE		BIT(31)
+#define  IO_SPACE		BIT(8)
+
+/* Configuration */
+#define PCICONF(x)		(0x010000 + ((x) * 0x4))
+#define PMCAP(x)		(0x010040 + ((x) * 0x4))
+#define EXPCAP(x)		(0x010070 + ((x) * 0x4))
+#define VCCAP(x)		(0x010100 + ((x) * 0x4))
+
+/* link layer */
+#define IDSETR1			0x011004
+#define TLCTLR			0x011048
+#define MACSR			0x011054
+#define  SPCHGFIN		BIT(4)
+#define  SPCHGFAIL		BIT(6)
+#define  SPCHGSUC		BIT(7)
+#define  LINK_SPEED		(0xf << 16)
+#define  LINK_SPEED_2_5GTS	(1 << 16)
+#define  LINK_SPEED_5_0GTS	(2 << 16)
+#define MACCTLR			0x011058
+#define  MACCTLR_NFTS_MASK	GENMASK(23, 16)	/* The name is from SH7786 */
+#define  SPEED_CHANGE		BIT(24)
+#define  SCRAMBLE_DISABLE	BIT(27)
+#define  LTSMDIS		BIT(31)
+#define  MACCTLR_INIT_VAL	(LTSMDIS | MACCTLR_NFTS_MASK)
+#define PMSR			0x01105c
+#define MACS2R			0x011078
+#define MACCGSPSETR		0x011084
+#define  SPCNGRSN		BIT(31)
+
+/* R-Car H1 PHY */
+#define H1_PCIEPHYADRR		0x04000c
+#define  WRITE_CMD		BIT(16)
+#define  PHY_ACK		BIT(24)
+#define  RATE_POS		12
+#define  LANE_POS		8
+#define  ADR_POS		0
+#define H1_PCIEPHYDOUTR		0x040014
+
+/* R-Car Gen2 PHY */
+#define GEN2_PCIEPHYADDR	0x780
+#define GEN2_PCIEPHYDATA	0x784
+#define GEN2_PCIEPHYCTRL	0x78c
+
+#define INT_PCI_MSI_NR		32
+
+#define RCONF(x)		(PCICONF(0) + (x))
+#define RPMCAP(x)		(PMCAP(0) + (x))
+#define REXPCAP(x)		(EXPCAP(0) + (x))
+#define RVCCAP(x)		(VCCAP(0) + (x))
+
+#define PCIE_CONF_BUS(b)	(((b) & 0xff) << 24)
+#define PCIE_CONF_DEV(d)	(((d) & 0x1f) << 19)
+#define PCIE_CONF_FUNC(f)	(((f) & 0x7) << 16)
+
+#define RCAR_PCI_MAX_RESOURCES	4
+#define MAX_NR_INBOUND_MAPS	6
+
+struct rcar_msi {
+	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
+	struct irq_domain *domain;
+	struct msi_controller chip;
+	unsigned long pages;
+	struct mutex lock;
+	int irq1;
+	int irq2;
+};
+
+static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip)
+{
+	return container_of(chip, struct rcar_msi, chip);
+}
+
+/* Structure representing the PCIe interface */
+struct rcar_pcie {
+	struct device		*dev;
+	struct phy		*phy;
+	void __iomem		*base;
+	struct list_head	resources;
+	int			root_bus_nr;
+	struct clk		*bus_clk;
+	struct			rcar_msi msi;
+};
+
+static void rcar_pci_write_reg(struct rcar_pcie *pcie, u32 val,
+			       unsigned int reg)
+{
+	writel(val, pcie->base + reg);
+}
+
+static u32 rcar_pci_read_reg(struct rcar_pcie *pcie, unsigned int reg)
+{
+	return readl(pcie->base + reg);
+}
+
+enum {
+	RCAR_PCI_ACCESS_READ,
+	RCAR_PCI_ACCESS_WRITE,
+};
+
+static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data)
+{
+	unsigned int shift = BITS_PER_BYTE * (where & 3);
+	u32 val = rcar_pci_read_reg(pcie, where & ~3);
+
+	val &= ~(mask << shift);
+	val |= data << shift;
+	rcar_pci_write_reg(pcie, val, where & ~3);
+}
+
+static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
+{
+	unsigned int shift = BITS_PER_BYTE * (where & 3);
+	u32 val = rcar_pci_read_reg(pcie, where & ~3);
+
+	return val >> shift;
+}
+
+/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
+static int rcar_pcie_config_access(struct rcar_pcie *pcie,
+		unsigned char access_type, struct pci_bus *bus,
+		unsigned int devfn, int where, u32 *data)
+{
+	unsigned int dev, func, reg, index;
+
+	dev = PCI_SLOT(devfn);
+	func = PCI_FUNC(devfn);
+	reg = where & ~3;
+	index = reg / 4;
+
+	/*
+	 * While each channel has its own memory-mapped extended config
+	 * space, it's generally only accessible when in endpoint mode.
+	 * When in root complex mode, the controller is unable to target
+	 * itself with either type 0 or type 1 accesses, and indeed, any
+	 * controller initiated target transfer to its own config space
+	 * result in a completer abort.
+	 *
+	 * Each channel effectively only supports a single device, but as
+	 * the same channel <-> device access works for any PCI_SLOT()
+	 * value, we cheat a bit here and bind the controller's config
+	 * space to devfn 0 in order to enable self-enumeration. In this
+	 * case the regular ECAR/ECDR path is sidelined and the mangled
+	 * config access itself is initiated as an internal bus transaction.
+	 */
+	if (pci_is_root_bus(bus)) {
+		if (dev != 0)
+			return PCIBIOS_DEVICE_NOT_FOUND;
+
+		if (access_type == RCAR_PCI_ACCESS_READ) {
+			*data = rcar_pci_read_reg(pcie, PCICONF(index));
+		} else {
+			/* Keep an eye out for changes to the root bus number */
+			if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS))
+				pcie->root_bus_nr = *data & 0xff;
+
+			rcar_pci_write_reg(pcie, *data, PCICONF(index));
+		}
+
+		return PCIBIOS_SUCCESSFUL;
+	}
+
+	if (pcie->root_bus_nr < 0)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	/* Clear errors */
+	rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
+
+	/* Set the PIO address */
+	rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) |
+		PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR);
+
+	/* Enable the configuration access */
+	if (bus->parent->number == pcie->root_bus_nr)
+		rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
+	else
+		rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
+
+	/* Check for errors */
+	if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	/* Check for master and target aborts */
+	if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) &
+		(PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	if (access_type == RCAR_PCI_ACCESS_READ)
+		*data = rcar_pci_read_reg(pcie, PCIECDR);
+	else
+		rcar_pci_write_reg(pcie, *data, PCIECDR);
+
+	/* Disable the configuration access */
+	rcar_pci_write_reg(pcie, 0, PCIECCTLR);
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
+			       int where, int size, u32 *val)
+{
+	struct rcar_pcie *pcie = bus->sysdata;
+	int ret;
+
+	ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
+				      bus, devfn, where, val);
+	if (ret != PCIBIOS_SUCCESSFUL) {
+		*val = 0xffffffff;
+		return ret;
+	}
+
+	if (size == 1)
+		*val = (*val >> (BITS_PER_BYTE * (where & 3))) & 0xff;
+	else if (size == 2)
+		*val = (*val >> (BITS_PER_BYTE * (where & 2))) & 0xffff;
+
+	dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
+		bus->number, devfn, where, size, *val);
+
+	return ret;
+}
+
+/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
+static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
+				int where, int size, u32 val)
+{
+	struct rcar_pcie *pcie = bus->sysdata;
+	unsigned int shift;
+	u32 data;
+	int ret;
+
+	ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
+				      bus, devfn, where, &data);
+	if (ret != PCIBIOS_SUCCESSFUL)
+		return ret;
+
+	dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
+		bus->number, devfn, where, size, val);
+
+	if (size == 1) {
+		shift = BITS_PER_BYTE * (where & 3);
+		data &= ~(0xff << shift);
+		data |= ((val & 0xff) << shift);
+	} else if (size == 2) {
+		shift = BITS_PER_BYTE * (where & 2);
+		data &= ~(0xffff << shift);
+		data |= ((val & 0xffff) << shift);
+	} else
+		data = val;
+
+	ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_WRITE,
+				      bus, devfn, where, &data);
+
+	return ret;
+}
+
+static struct pci_ops rcar_pcie_ops = {
+	.read	= rcar_pcie_read_conf,
+	.write	= rcar_pcie_write_conf,
+};
+
+static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie,
+				   struct resource_entry *window)
+{
+	/* Setup PCIe address space mappings for each resource */
+	resource_size_t size;
+	resource_size_t res_start;
+	struct resource *res = window->res;
+	u32 mask;
+
+	rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win));
+
+	/*
+	 * The PAMR mask is calculated in units of 128Bytes, which
+	 * keeps things pretty simple.
+	 */
+	size = resource_size(res);
+	mask = (roundup_pow_of_two(size) / SZ_128) - 1;
+	rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win));
+
+	if (res->flags & IORESOURCE_IO)
+		res_start = pci_pio_to_address(res->start) - window->offset;
+	else
+		res_start = res->start - window->offset;
+
+	rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPAUR(win));
+	rcar_pci_write_reg(pcie, lower_32_bits(res_start) & ~0x7F,
+			   PCIEPALR(win));
+
+	/* First resource is for IO */
+	mask = PAR_ENABLE;
+	if (res->flags & IORESOURCE_IO)
+		mask |= IO_SPACE;
+
+	rcar_pci_write_reg(pcie, mask, PCIEPTCTLR(win));
+}
+
+static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci)
+{
+	struct resource_entry *win;
+	int i = 0;
+
+	/* Setup PCI resources */
+	resource_list_for_each_entry(win, &pci->resources) {
+		struct resource *res = win->res;
+
+		if (!res->flags)
+			continue;
+
+		switch (resource_type(res)) {
+		case IORESOURCE_IO:
+		case IORESOURCE_MEM:
+			rcar_pcie_setup_window(i, pci, win);
+			i++;
+			break;
+		case IORESOURCE_BUS:
+			pci->root_bus_nr = res->start;
+			break;
+		default:
+			continue;
+		}
+
+		pci_add_resource(resource, res);
+	}
+
+	return 1;
+}
+
+static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	unsigned int timeout = 1000;
+	u32 macsr;
+
+	if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS)
+		return;
+
+	if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) {
+		dev_err(dev, "Speed change already in progress\n");
+		return;
+	}
+
+	macsr = rcar_pci_read_reg(pcie, MACSR);
+	if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS)
+		goto done;
+
+	/* Set target link speed to 5.0 GT/s */
+	rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
+		   PCI_EXP_LNKSTA_CLS_5_0GB);
+
+	/* Set speed change reason as intentional factor */
+	rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0);
+
+	/* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */
+	if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL))
+		rcar_pci_write_reg(pcie, macsr, MACSR);
+
+	/* Start link speed change */
+	rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE);
+
+	while (timeout--) {
+		macsr = rcar_pci_read_reg(pcie, MACSR);
+		if (macsr & SPCHGFIN) {
+			/* Clear the interrupt bits */
+			rcar_pci_write_reg(pcie, macsr, MACSR);
+
+			if (macsr & SPCHGFAIL)
+				dev_err(dev, "Speed change failed\n");
+
+			goto done;
+		}
+
+		msleep(1);
+	}
+
+	dev_err(dev, "Speed change timed out\n");
+
+done:
+	dev_info(dev, "Current link speed is %s GT/s\n",
+		 (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5");
+}
+
+static int rcar_pcie_enable(struct rcar_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+	struct pci_bus *bus, *child;
+	int ret;
+
+	/* Try setting 5 GT/s link speed */
+	rcar_pcie_force_speedup(pcie);
+
+	rcar_pcie_setup(&bridge->windows, pcie);
+
+	pci_add_flags(PCI_REASSIGN_ALL_BUS);
+
+	bridge->dev.parent = dev;
+	bridge->sysdata = pcie;
+	bridge->busnr = pcie->root_bus_nr;
+	bridge->ops = &rcar_pcie_ops;
+	bridge->map_irq = of_irq_parse_and_map_pci;
+	bridge->swizzle_irq = pci_common_swizzle;
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		bridge->msi = &pcie->msi.chip;
+
+	ret = pci_scan_root_bus_bridge(bridge);
+	if (ret < 0)
+		return ret;
+
+	bus = bridge->bus;
+
+	pci_bus_size_bridges(bus);
+	pci_bus_assign_resources(bus);
+
+	list_for_each_entry(child, &bus->children, node)
+		pcie_bus_configure_settings(child);
+
+	pci_bus_add_devices(bus);
+
+	return 0;
+}
+
+static int phy_wait_for_ack(struct rcar_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	unsigned int timeout = 100;
+
+	while (timeout--) {
+		if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
+			return 0;
+
+		udelay(100);
+	}
+
+	dev_err(dev, "Access to PCIe phy timed out\n");
+
+	return -ETIMEDOUT;
+}
+
+static void phy_write_reg(struct rcar_pcie *pcie,
+			  unsigned int rate, u32 addr,
+			  unsigned int lane, u32 data)
+{
+	u32 phyaddr;
+
+	phyaddr = WRITE_CMD |
+		((rate & 1) << RATE_POS) |
+		((lane & 0xf) << LANE_POS) |
+		((addr & 0xff) << ADR_POS);
+
+	/* Set write data */
+	rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
+	rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
+
+	/* Ignore errors as they will be dealt with if the data link is down */
+	phy_wait_for_ack(pcie);
+
+	/* Clear command */
+	rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
+	rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
+
+	/* Ignore errors as they will be dealt with if the data link is down */
+	phy_wait_for_ack(pcie);
+}
+
+static int rcar_pcie_wait_for_phyrdy(struct rcar_pcie *pcie)
+{
+	unsigned int timeout = 10;
+
+	while (timeout--) {
+		if (rcar_pci_read_reg(pcie, PCIEPHYSR) & PHYRDY)
+			return 0;
+
+		msleep(5);
+	}
+
+	return -ETIMEDOUT;
+}
+
+static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
+{
+	unsigned int timeout = 10000;
+
+	while (timeout--) {
+		if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
+			return 0;
+
+		udelay(5);
+		cpu_relax();
+	}
+
+	return -ETIMEDOUT;
+}
+
+static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
+{
+	int err;
+
+	/* Begin initialization */
+	rcar_pci_write_reg(pcie, 0, PCIETCTLR);
+
+	/* Set mode */
+	rcar_pci_write_reg(pcie, 1, PCIEMSR);
+
+	err = rcar_pcie_wait_for_phyrdy(pcie);
+	if (err)
+		return err;
+
+	/*
+	 * Initial header for port config space is type 1, set the device
+	 * class to match. Hardware takes care of propagating the IDSETR
+	 * settings, so there is no need to bother with a quirk.
+	 */
+	rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
+
+	/*
+	 * Setup Secondary Bus Number & Subordinate Bus Number, even though
+	 * they aren't used, to avoid bridge being detected as broken.
+	 */
+	rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1);
+	rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
+
+	/* Initialize default capabilities. */
+	rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
+	rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
+		PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
+	rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
+		PCI_HEADER_TYPE_BRIDGE);
+
+	/* Enable data link layer active state reporting */
+	rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC,
+		PCI_EXP_LNKCAP_DLLLARC);
+
+	/* Write out the physical slot number = 0 */
+	rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
+
+	/* Set the completion timer timeout to the maximum 50ms. */
+	rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
+
+	/* Terminate list of capabilities (Next Capability Offset=0) */
+	rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
+
+	/* Enable MSI */
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
+
+	rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
+
+	/* Finish initialization - establish a PCI Express link */
+	rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
+
+	/* This will timeout if we don't have a link. */
+	err = rcar_pcie_wait_for_dl(pcie);
+	if (err)
+		return err;
+
+	/* Enable INTx interrupts */
+	rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
+
+	wmb();
+
+	return 0;
+}
+
+static int rcar_pcie_phy_init_h1(struct rcar_pcie *pcie)
+{
+	/* Initialize the phy */
+	phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
+	phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
+	phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188);
+	phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188);
+	phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014);
+	phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014);
+	phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0);
+	phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB);
+	phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062);
+	phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000);
+	phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000);
+	phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806);
+
+	phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5);
+	phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
+	phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
+
+	return 0;
+}
+
+static int rcar_pcie_phy_init_gen2(struct rcar_pcie *pcie)
+{
+	/*
+	 * These settings come from the R-Car Series, 2nd Generation User's
+	 * Manual, section 50.3.1 (2) Initialization of the physical layer.
+	 */
+	rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR);
+	rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA);
+	rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
+	rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
+
+	rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR);
+	/* The following value is for DC connection, no termination resistor */
+	rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA);
+	rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
+	rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
+
+	return 0;
+}
+
+static int rcar_pcie_phy_init_gen3(struct rcar_pcie *pcie)
+{
+	int err;
+
+	err = phy_init(pcie->phy);
+	if (err)
+		return err;
+
+	err = phy_power_on(pcie->phy);
+	if (err)
+		phy_exit(pcie->phy);
+
+	return err;
+}
+
+static int rcar_msi_alloc(struct rcar_msi *chip)
+{
+	int msi;
+
+	mutex_lock(&chip->lock);
+
+	msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
+	if (msi < INT_PCI_MSI_NR)
+		set_bit(msi, chip->used);
+	else
+		msi = -ENOSPC;
+
+	mutex_unlock(&chip->lock);
+
+	return msi;
+}
+
+static int rcar_msi_alloc_region(struct rcar_msi *chip, int no_irqs)
+{
+	int msi;
+
+	mutex_lock(&chip->lock);
+	msi = bitmap_find_free_region(chip->used, INT_PCI_MSI_NR,
+				      order_base_2(no_irqs));
+	mutex_unlock(&chip->lock);
+
+	return msi;
+}
+
+static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq)
+{
+	mutex_lock(&chip->lock);
+	clear_bit(irq, chip->used);
+	mutex_unlock(&chip->lock);
+}
+
+static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
+{
+	struct rcar_pcie *pcie = data;
+	struct rcar_msi *msi = &pcie->msi;
+	struct device *dev = pcie->dev;
+	unsigned long reg;
+
+	reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
+
+	/* MSI & INTx share an interrupt - we only handle MSI here */
+	if (!reg)
+		return IRQ_NONE;
+
+	while (reg) {
+		unsigned int index = find_first_bit(&reg, 32);
+		unsigned int msi_irq;
+
+		/* clear the interrupt */
+		rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR);
+
+		msi_irq = irq_find_mapping(msi->domain, index);
+		if (msi_irq) {
+			if (test_bit(index, msi->used))
+				generic_handle_irq(msi_irq);
+			else
+				dev_info(dev, "unhandled MSI\n");
+		} else {
+			/* Unknown MSI, just clear it */
+			dev_dbg(dev, "unexpected MSI\n");
+		}
+
+		/* see if there's any more pending in this vector */
+		reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
+			      struct msi_desc *desc)
+{
+	struct rcar_msi *msi = to_rcar_msi(chip);
+	struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip);
+	struct msi_msg msg;
+	unsigned int irq;
+	int hwirq;
+
+	hwirq = rcar_msi_alloc(msi);
+	if (hwirq < 0)
+		return hwirq;
+
+	irq = irq_find_mapping(msi->domain, hwirq);
+	if (!irq) {
+		rcar_msi_free(msi, hwirq);
+		return -EINVAL;
+	}
+
+	irq_set_msi_desc(irq, desc);
+
+	msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
+	msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
+	msg.data = hwirq;
+
+	pci_write_msi_msg(irq, &msg);
+
+	return 0;
+}
+
+static int rcar_msi_setup_irqs(struct msi_controller *chip,
+			       struct pci_dev *pdev, int nvec, int type)
+{
+	struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip);
+	struct rcar_msi *msi = to_rcar_msi(chip);
+	struct msi_desc *desc;
+	struct msi_msg msg;
+	unsigned int irq;
+	int hwirq;
+	int i;
+
+	/* MSI-X interrupts are not supported */
+	if (type == PCI_CAP_ID_MSIX)
+		return -EINVAL;
+
+	WARN_ON(!list_is_singular(&pdev->dev.msi_list));
+	desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
+
+	hwirq = rcar_msi_alloc_region(msi, nvec);
+	if (hwirq < 0)
+		return -ENOSPC;
+
+	irq = irq_find_mapping(msi->domain, hwirq);
+	if (!irq)
+		return -ENOSPC;
+
+	for (i = 0; i < nvec; i++) {
+		/*
+		 * irq_create_mapping() called from rcar_pcie_probe() pre-
+		 * allocates descs,  so there is no need to allocate descs here.
+		 * We can therefore assume that if irq_find_mapping() above
+		 * returns non-zero, then the descs are also successfully
+		 * allocated.
+		 */
+		if (irq_set_msi_desc_off(irq, i, desc)) {
+			/* TODO: clear */
+			return -EINVAL;
+		}
+	}
+
+	desc->nvec_used = nvec;
+	desc->msi_attrib.multiple = order_base_2(nvec);
+
+	msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
+	msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
+	msg.data = hwirq;
+
+	pci_write_msi_msg(irq, &msg);
+
+	return 0;
+}
+
+static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
+{
+	struct rcar_msi *msi = to_rcar_msi(chip);
+	struct irq_data *d = irq_get_irq_data(irq);
+
+	rcar_msi_free(msi, d->hwirq);
+}
+
+static struct irq_chip rcar_msi_irq_chip = {
+	.name = "R-Car PCIe MSI",
+	.irq_enable = pci_msi_unmask_irq,
+	.irq_disable = pci_msi_mask_irq,
+	.irq_mask = pci_msi_mask_irq,
+	.irq_unmask = pci_msi_unmask_irq,
+};
+
+static int rcar_msi_map(struct irq_domain *domain, unsigned int irq,
+			irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq);
+	irq_set_chip_data(irq, domain->host_data);
+
+	return 0;
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+	.map = rcar_msi_map,
+};
+
+static void rcar_pcie_unmap_msi(struct rcar_pcie *pcie)
+{
+	struct rcar_msi *msi = &pcie->msi;
+	int i, irq;
+
+	for (i = 0; i < INT_PCI_MSI_NR; i++) {
+		irq = irq_find_mapping(msi->domain, i);
+		if (irq > 0)
+			irq_dispose_mapping(irq);
+	}
+
+	irq_domain_remove(msi->domain);
+}
+
+static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	struct rcar_msi *msi = &pcie->msi;
+	phys_addr_t base;
+	int err, i;
+
+	mutex_init(&msi->lock);
+
+	msi->chip.dev = dev;
+	msi->chip.setup_irq = rcar_msi_setup_irq;
+	msi->chip.setup_irqs = rcar_msi_setup_irqs;
+	msi->chip.teardown_irq = rcar_msi_teardown_irq;
+
+	msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
+					    &msi_domain_ops, &msi->chip);
+	if (!msi->domain) {
+		dev_err(dev, "failed to create IRQ domain\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < INT_PCI_MSI_NR; i++)
+		irq_create_mapping(msi->domain, i);
+
+	/* Two irqs are for MSI, but they are also used for non-MSI irqs */
+	err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
+			       IRQF_SHARED | IRQF_NO_THREAD,
+			       rcar_msi_irq_chip.name, pcie);
+	if (err < 0) {
+		dev_err(dev, "failed to request IRQ: %d\n", err);
+		goto err;
+	}
+
+	err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq,
+			       IRQF_SHARED | IRQF_NO_THREAD,
+			       rcar_msi_irq_chip.name, pcie);
+	if (err < 0) {
+		dev_err(dev, "failed to request IRQ: %d\n", err);
+		goto err;
+	}
+
+	/* setup MSI data target */
+	msi->pages = __get_free_pages(GFP_KERNEL, 0);
+	if (!msi->pages) {
+		err = -ENOMEM;
+		goto err;
+	}
+	base = virt_to_phys((void *)msi->pages);
+
+	rcar_pci_write_reg(pcie, lower_32_bits(base) | MSIFE, PCIEMSIALR);
+	rcar_pci_write_reg(pcie, upper_32_bits(base), PCIEMSIAUR);
+
+	/* enable all MSI interrupts */
+	rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
+
+	return 0;
+
+err:
+	rcar_pcie_unmap_msi(pcie);
+	return err;
+}
+
+static void rcar_pcie_teardown_msi(struct rcar_pcie *pcie)
+{
+	struct rcar_msi *msi = &pcie->msi;
+
+	/* Disable all MSI interrupts */
+	rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
+
+	/* Disable address decoding of the MSI interrupt, MSIFE */
+	rcar_pci_write_reg(pcie, 0, PCIEMSIALR);
+
+	free_pages(msi->pages, 0);
+
+	rcar_pcie_unmap_msi(pcie);
+}
+
+static int rcar_pcie_get_resources(struct rcar_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	struct resource res;
+	int err, i;
+
+	pcie->phy = devm_phy_optional_get(dev, "pcie");
+	if (IS_ERR(pcie->phy))
+		return PTR_ERR(pcie->phy);
+
+	err = of_address_to_resource(dev->of_node, 0, &res);
+	if (err)
+		return err;
+
+	pcie->base = devm_ioremap_resource(dev, &res);
+	if (IS_ERR(pcie->base))
+		return PTR_ERR(pcie->base);
+
+	pcie->bus_clk = devm_clk_get(dev, "pcie_bus");
+	if (IS_ERR(pcie->bus_clk)) {
+		dev_err(dev, "cannot get pcie bus clock\n");
+		return PTR_ERR(pcie->bus_clk);
+	}
+
+	i = irq_of_parse_and_map(dev->of_node, 0);
+	if (!i) {
+		dev_err(dev, "cannot get platform resources for msi interrupt\n");
+		err = -ENOENT;
+		goto err_irq1;
+	}
+	pcie->msi.irq1 = i;
+
+	i = irq_of_parse_and_map(dev->of_node, 1);
+	if (!i) {
+		dev_err(dev, "cannot get platform resources for msi interrupt\n");
+		err = -ENOENT;
+		goto err_irq2;
+	}
+	pcie->msi.irq2 = i;
+
+	return 0;
+
+err_irq2:
+	irq_dispose_mapping(pcie->msi.irq1);
+err_irq1:
+	return err;
+}
+
+static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
+				    struct of_pci_range *range,
+				    int *index)
+{
+	u64 restype = range->flags;
+	u64 cpu_addr = range->cpu_addr;
+	u64 cpu_end = range->cpu_addr + range->size;
+	u64 pci_addr = range->pci_addr;
+	u32 flags = LAM_64BIT | LAR_ENABLE;
+	u64 mask;
+	u64 size;
+	int idx = *index;
+
+	if (restype & IORESOURCE_PREFETCH)
+		flags |= LAM_PREFETCH;
+
+	/*
+	 * If the size of the range is larger than the alignment of the start
+	 * address, we have to use multiple entries to perform the mapping.
+	 */
+	if (cpu_addr > 0) {
+		unsigned long nr_zeros = __ffs64(cpu_addr);
+		u64 alignment = 1ULL << nr_zeros;
+
+		size = min(range->size, alignment);
+	} else {
+		size = range->size;
+	}
+	/* Hardware supports max 4GiB inbound region */
+	size = min(size, 1ULL << 32);
+
+	mask = roundup_pow_of_two(size) - 1;
+	mask &= ~0xf;
+
+	while (cpu_addr < cpu_end) {
+		/*
+		 * Set up 64-bit inbound regions as the range parser doesn't
+		 * distinguish between 32 and 64-bit types.
+		 */
+		rcar_pci_write_reg(pcie, lower_32_bits(pci_addr),
+				   PCIEPRAR(idx));
+		rcar_pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx));
+		rcar_pci_write_reg(pcie, lower_32_bits(mask) | flags,
+				   PCIELAMR(idx));
+
+		rcar_pci_write_reg(pcie, upper_32_bits(pci_addr),
+				   PCIEPRAR(idx + 1));
+		rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr),
+				   PCIELAR(idx + 1));
+		rcar_pci_write_reg(pcie, 0, PCIELAMR(idx + 1));
+
+		pci_addr += size;
+		cpu_addr += size;
+		idx += 2;
+
+		if (idx > MAX_NR_INBOUND_MAPS) {
+			dev_err(pcie->dev, "Failed to map inbound regions!\n");
+			return -EINVAL;
+		}
+	}
+	*index = idx;
+
+	return 0;
+}
+
+static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
+					  struct device_node *np)
+{
+	struct of_pci_range range;
+	struct of_pci_range_parser parser;
+	int index = 0;
+	int err;
+
+	if (of_pci_dma_range_parser_init(&parser, np))
+		return -EINVAL;
+
+	/* Get the dma-ranges from DT */
+	for_each_of_pci_range(&parser, &range) {
+		u64 end = range.cpu_addr + range.size - 1;
+
+		dev_dbg(pcie->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
+			range.flags, range.cpu_addr, end, range.pci_addr);
+
+		err = rcar_pcie_inbound_ranges(pcie, &range, &index);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id rcar_pcie_of_match[] = {
+	{ .compatible = "renesas,pcie-r8a7779",
+	  .data = rcar_pcie_phy_init_h1 },
+	{ .compatible = "renesas,pcie-r8a7790",
+	  .data = rcar_pcie_phy_init_gen2 },
+	{ .compatible = "renesas,pcie-r8a7791",
+	  .data = rcar_pcie_phy_init_gen2 },
+	{ .compatible = "renesas,pcie-rcar-gen2",
+	  .data = rcar_pcie_phy_init_gen2 },
+	{ .compatible = "renesas,pcie-r8a7795",
+	  .data = rcar_pcie_phy_init_gen3 },
+	{ .compatible = "renesas,pcie-rcar-gen3",
+	  .data = rcar_pcie_phy_init_gen3 },
+	{},
+};
+
+static int rcar_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct rcar_pcie *pcie;
+	u32 data;
+	int err;
+	int (*phy_init_fn)(struct rcar_pcie *);
+	struct pci_host_bridge *bridge;
+
+	bridge = pci_alloc_host_bridge(sizeof(*pcie));
+	if (!bridge)
+		return -ENOMEM;
+
+	pcie = pci_host_bridge_priv(bridge);
+
+	pcie->dev = dev;
+	platform_set_drvdata(pdev, pcie);
+
+	err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, NULL);
+	if (err)
+		goto err_free_bridge;
+
+	pm_runtime_enable(pcie->dev);
+	err = pm_runtime_get_sync(pcie->dev);
+	if (err < 0) {
+		dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
+		goto err_pm_disable;
+	}
+
+	err = rcar_pcie_get_resources(pcie);
+	if (err < 0) {
+		dev_err(dev, "failed to request resources: %d\n", err);
+		goto err_pm_put;
+	}
+
+	err = clk_prepare_enable(pcie->bus_clk);
+	if (err) {
+		dev_err(dev, "failed to enable bus clock: %d\n", err);
+		goto err_unmap_msi_irqs;
+	}
+
+	err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
+	if (err)
+		goto err_clk_disable;
+
+	phy_init_fn = of_device_get_match_data(dev);
+	err = phy_init_fn(pcie);
+	if (err) {
+		dev_err(dev, "failed to init PCIe PHY\n");
+		goto err_clk_disable;
+	}
+
+	/* Failure to get a link might just be that no cards are inserted */
+	if (rcar_pcie_hw_init(pcie)) {
+		dev_info(dev, "PCIe link down\n");
+		err = -ENODEV;
+		goto err_phy_shutdown;
+	}
+
+	data = rcar_pci_read_reg(pcie, MACSR);
+	dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		err = rcar_pcie_enable_msi(pcie);
+		if (err < 0) {
+			dev_err(dev,
+				"failed to enable MSI support: %d\n",
+				err);
+			goto err_phy_shutdown;
+		}
+	}
+
+	err = rcar_pcie_enable(pcie);
+	if (err)
+		goto err_msi_teardown;
+
+	return 0;
+
+err_msi_teardown:
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		rcar_pcie_teardown_msi(pcie);
+
+err_phy_shutdown:
+	if (pcie->phy) {
+		phy_power_off(pcie->phy);
+		phy_exit(pcie->phy);
+	}
+
+err_clk_disable:
+	clk_disable_unprepare(pcie->bus_clk);
+
+err_unmap_msi_irqs:
+	irq_dispose_mapping(pcie->msi.irq2);
+	irq_dispose_mapping(pcie->msi.irq1);
+
+err_pm_put:
+	pm_runtime_put(dev);
+
+err_pm_disable:
+	pm_runtime_disable(dev);
+	pci_free_resource_list(&pcie->resources);
+
+err_free_bridge:
+	pci_free_host_bridge(bridge);
+
+	return err;
+}
+
+static int rcar_pcie_resume_noirq(struct device *dev)
+{
+	struct rcar_pcie *pcie = dev_get_drvdata(dev);
+
+	if (rcar_pci_read_reg(pcie, PMSR) &&
+	    !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN))
+		return 0;
+
+	/* Re-establish the PCIe link */
+	rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
+	rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
+	return rcar_pcie_wait_for_dl(pcie);
+}
+
+static const struct dev_pm_ops rcar_pcie_pm_ops = {
+	.resume_noirq = rcar_pcie_resume_noirq,
+};
+
+static struct platform_driver rcar_pcie_driver = {
+	.driver = {
+		.name = "rcar-pcie",
+		.of_match_table = rcar_pcie_of_match,
+		.pm = &rcar_pcie_pm_ops,
+		.suppress_bind_attrs = true,
+	},
+	.probe = rcar_pcie_probe,
+};
+builtin_platform_driver(rcar_pcie_driver);
diff --git a/marvell/linux/drivers/pci/controller/pcie-rockchip-ep.c b/marvell/linux/drivers/pci/controller/pcie-rockchip-ep.c
new file mode 100644
index 0000000..8fc8848
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pcie-rockchip-ep.c
@@ -0,0 +1,640 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Rockchip AXI PCIe endpoint controller driver
+ *
+ * Copyright (c) 2018 Rockchip, Inc.
+ *
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ *         Simon Xue <xxm@rock-chips.com>
+ */
+
+#include <linux/configfs.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/pci-epc.h>
+#include <linux/platform_device.h>
+#include <linux/pci-epf.h>
+#include <linux/sizes.h>
+
+#include "pcie-rockchip.h"
+
+/**
+ * struct rockchip_pcie_ep - private data for PCIe endpoint controller driver
+ * @rockchip: Rockchip PCIe controller
+ * @max_regions: maximum number of regions supported by hardware
+ * @ob_region_map: bitmask of mapped outbound regions
+ * @ob_addr: base addresses in the AXI bus where the outbound regions start
+ * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
+ *		   dedicated outbound regions is mapped.
+ * @irq_cpu_addr: base address in the CPU space where a write access triggers
+ *		  the sending of a memory write (MSI) / normal message (legacy
+ *		  IRQ) TLP through the PCIe bus.
+ * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
+ *		  dedicated outbound region.
+ * @irq_pci_fn: the latest PCI function that has updated the mapping of
+ *		the MSI/legacy IRQ dedicated outbound region.
+ * @irq_pending: bitmask of asserted legacy IRQs.
+ */
+struct rockchip_pcie_ep {
+	struct rockchip_pcie	rockchip;
+	struct pci_epc		*epc;
+	u32			max_regions;
+	unsigned long		ob_region_map;
+	phys_addr_t		*ob_addr;
+	phys_addr_t		irq_phys_addr;
+	void __iomem		*irq_cpu_addr;
+	u64			irq_pci_addr;
+	u8			irq_pci_fn;
+	u8			irq_pending;
+};
+
+static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip,
+					  u32 region)
+{
+	rockchip_pcie_write(rockchip, 0,
+			    ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(region));
+	rockchip_pcie_write(rockchip, 0,
+			    ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(region));
+	rockchip_pcie_write(rockchip, 0,
+			    ROCKCHIP_PCIE_AT_OB_REGION_DESC0(region));
+	rockchip_pcie_write(rockchip, 0,
+			    ROCKCHIP_PCIE_AT_OB_REGION_DESC1(region));
+	rockchip_pcie_write(rockchip, 0,
+			    ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(region));
+	rockchip_pcie_write(rockchip, 0,
+			    ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(region));
+}
+
+static int rockchip_pcie_ep_ob_atu_num_bits(struct rockchip_pcie *rockchip,
+					    u64 pci_addr, size_t size)
+{
+	int num_pass_bits = fls64(pci_addr ^ (pci_addr + size - 1));
+
+	return clamp(num_pass_bits,
+		     ROCKCHIP_PCIE_AT_MIN_NUM_BITS,
+		     ROCKCHIP_PCIE_AT_MAX_NUM_BITS);
+}
+
+static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn,
+					 u32 r, u32 type, u64 cpu_addr,
+					 u64 pci_addr, size_t size)
+{
+	int num_pass_bits;
+	u32 addr0, addr1, desc0, desc1;
+	bool is_nor_msg = (type == AXI_WRAPPER_NOR_MSG);
+
+	num_pass_bits = rockchip_pcie_ep_ob_atu_num_bits(rockchip,
+							 pci_addr, size);
+
+	cpu_addr -= rockchip->mem_res->start;
+	addr0 = ((is_nor_msg ? 0x10 : (num_pass_bits - 1)) &
+		PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
+		(lower_32_bits(cpu_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
+	addr1 = upper_32_bits(is_nor_msg ? cpu_addr : pci_addr);
+	desc0 = ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(fn) | type;
+	desc1 = 0;
+
+	if (is_nor_msg) {
+		rockchip_pcie_write(rockchip, 0,
+				    ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r));
+		rockchip_pcie_write(rockchip, 0,
+				    ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r));
+		rockchip_pcie_write(rockchip, desc0,
+				    ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r));
+		rockchip_pcie_write(rockchip, desc1,
+				    ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r));
+	} else {
+		/* PCI bus address region */
+		rockchip_pcie_write(rockchip, addr0,
+				    ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r));
+		rockchip_pcie_write(rockchip, addr1,
+				    ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r));
+		rockchip_pcie_write(rockchip, desc0,
+				    ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r));
+		rockchip_pcie_write(rockchip, desc1,
+				    ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r));
+
+		addr0 =
+		    ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
+		    (lower_32_bits(cpu_addr) &
+		     PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
+		addr1 = upper_32_bits(cpu_addr);
+	}
+
+	/* CPU bus address region */
+	rockchip_pcie_write(rockchip, addr0,
+			    ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(r));
+	rockchip_pcie_write(rockchip, addr1,
+			    ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r));
+}
+
+static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
+					 struct pci_epf_header *hdr)
+{
+	u32 reg;
+	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+	struct rockchip_pcie *rockchip = &ep->rockchip;
+
+	/* All functions share the same vendor ID with function 0 */
+	if (fn == 0) {
+		rockchip_pcie_write(rockchip,
+				    hdr->vendorid | hdr->subsys_vendor_id << 16,
+				    PCIE_CORE_CONFIG_VENDOR);
+	}
+
+	reg = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_DID_VID);
+	reg = (reg & 0xFFFF) | (hdr->deviceid << 16);
+	rockchip_pcie_write(rockchip, reg, PCIE_EP_CONFIG_DID_VID);
+
+	rockchip_pcie_write(rockchip,
+			    hdr->revid |
+			    hdr->progif_code << 8 |
+			    hdr->subclass_code << 16 |
+			    hdr->baseclass_code << 24,
+			    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_REVISION_ID);
+	rockchip_pcie_write(rockchip, hdr->cache_line_size,
+			    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+			    PCI_CACHE_LINE_SIZE);
+	rockchip_pcie_write(rockchip, hdr->subsys_id << 16,
+			    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+			    PCI_SUBSYSTEM_VENDOR_ID);
+	rockchip_pcie_write(rockchip, hdr->interrupt_pin << 8,
+			    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+			    PCI_INTERRUPT_LINE);
+
+	return 0;
+}
+
+static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
+				    struct pci_epf_bar *epf_bar)
+{
+	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+	struct rockchip_pcie *rockchip = &ep->rockchip;
+	dma_addr_t bar_phys = epf_bar->phys_addr;
+	enum pci_barno bar = epf_bar->barno;
+	int flags = epf_bar->flags;
+	u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
+	u64 sz;
+
+	/* BAR size is 2^(aperture + 7) */
+	sz = max_t(size_t, epf_bar->size, MIN_EP_APERTURE);
+
+	/*
+	 * roundup_pow_of_two() returns an unsigned long, which is not suited
+	 * for 64bit values.
+	 */
+	sz = 1ULL << fls64(sz - 1);
+	aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
+
+	if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
+		ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS;
+	} else {
+		bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
+		bool is_64bits = sz > SZ_2G;
+
+		if (is_64bits && (bar & 1))
+			return -EINVAL;
+
+		if (is_64bits && is_prefetch)
+			ctrl =
+			    ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
+		else if (is_prefetch)
+			ctrl =
+			    ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
+		else if (is_64bits)
+			ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS;
+		else
+			ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS;
+	}
+
+	if (bar < BAR_4) {
+		reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn);
+		b = bar;
+	} else {
+		reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn);
+		b = bar - BAR_4;
+	}
+
+	addr0 = lower_32_bits(bar_phys);
+	addr1 = upper_32_bits(bar_phys);
+
+	cfg = rockchip_pcie_read(rockchip, reg);
+	cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
+		 ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
+	cfg |= (ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
+		ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
+
+	rockchip_pcie_write(rockchip, cfg, reg);
+	rockchip_pcie_write(rockchip, addr0,
+			    ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar));
+	rockchip_pcie_write(rockchip, addr1,
+			    ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
+
+	return 0;
+}
+
+static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
+				       struct pci_epf_bar *epf_bar)
+{
+	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+	struct rockchip_pcie *rockchip = &ep->rockchip;
+	u32 reg, cfg, b, ctrl;
+	enum pci_barno bar = epf_bar->barno;
+
+	if (bar < BAR_4) {
+		reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn);
+		b = bar;
+	} else {
+		reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn);
+		b = bar - BAR_4;
+	}
+
+	ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED;
+	cfg = rockchip_pcie_read(rockchip, reg);
+	cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
+		 ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
+	cfg |= ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
+
+	rockchip_pcie_write(rockchip, cfg, reg);
+	rockchip_pcie_write(rockchip, 0x0,
+			    ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar));
+	rockchip_pcie_write(rockchip, 0x0,
+			    ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar));
+}
+
+static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
+				     phys_addr_t addr, u64 pci_addr,
+				     size_t size)
+{
+	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+	struct rockchip_pcie *pcie = &ep->rockchip;
+	u32 r;
+
+	r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG);
+	/*
+	 * Region 0 is reserved for configuration space and shouldn't
+	 * be used elsewhere per TRM, so leave it out.
+	 */
+	if (r >= ep->max_regions - 1) {
+		dev_err(&epc->dev, "no free outbound region\n");
+		return -EINVAL;
+	}
+
+	rockchip_pcie_prog_ep_ob_atu(pcie, fn, r, AXI_WRAPPER_MEM_WRITE, addr,
+				     pci_addr, size);
+
+	set_bit(r, &ep->ob_region_map);
+	ep->ob_addr[r] = addr;
+
+	return 0;
+}
+
+static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
+					phys_addr_t addr)
+{
+	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+	struct rockchip_pcie *rockchip = &ep->rockchip;
+	u32 r;
+
+	for (r = 0; r < ep->max_regions - 1; r++)
+		if (ep->ob_addr[r] == addr)
+			break;
+
+	/*
+	 * Region 0 is reserved for configuration space and shouldn't
+	 * be used elsewhere per TRM, so leave it out.
+	 */
+	if (r == ep->max_regions - 1)
+		return;
+
+	rockchip_pcie_clear_ep_ob_atu(rockchip, r);
+
+	ep->ob_addr[r] = 0;
+	clear_bit(r, &ep->ob_region_map);
+}
+
+static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn,
+				    u8 multi_msg_cap)
+{
+	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+	struct rockchip_pcie *rockchip = &ep->rockchip;
+	u32 flags;
+
+	flags = rockchip_pcie_read(rockchip,
+				   ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+				   ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+	flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK;
+	flags |=
+	   (multi_msg_cap << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) |
+	   (PCI_MSI_FLAGS_64BIT << ROCKCHIP_PCIE_EP_MSI_FLAGS_OFFSET);
+	flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP;
+	rockchip_pcie_write(rockchip, flags,
+			    ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+			    ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+	return 0;
+}
+
+static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
+{
+	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+	struct rockchip_pcie *rockchip = &ep->rockchip;
+	u32 flags;
+
+	flags = rockchip_pcie_read(rockchip,
+				   ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+				   ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+	if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
+		return -EINVAL;
+
+	return ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
+			ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
+}
+
+static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn,
+					 u8 intx, bool do_assert)
+{
+	struct rockchip_pcie *rockchip = &ep->rockchip;
+
+	intx &= 3;
+
+	if (do_assert) {
+		ep->irq_pending |= BIT(intx);
+		rockchip_pcie_write(rockchip,
+				    PCIE_CLIENT_INT_IN_ASSERT |
+				    PCIE_CLIENT_INT_PEND_ST_PEND,
+				    PCIE_CLIENT_LEGACY_INT_CTRL);
+	} else {
+		ep->irq_pending &= ~BIT(intx);
+		rockchip_pcie_write(rockchip,
+				    PCIE_CLIENT_INT_IN_DEASSERT |
+				    PCIE_CLIENT_INT_PEND_ST_NORMAL,
+				    PCIE_CLIENT_LEGACY_INT_CTRL);
+	}
+}
+
+static int rockchip_pcie_ep_send_legacy_irq(struct rockchip_pcie_ep *ep, u8 fn,
+					    u8 intx)
+{
+	u16 cmd;
+
+	cmd = rockchip_pcie_read(&ep->rockchip,
+				 ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+				 ROCKCHIP_PCIE_EP_CMD_STATUS);
+
+	if (cmd & PCI_COMMAND_INTX_DISABLE)
+		return -EINVAL;
+
+	/*
+	 * Should add some delay between toggling INTx per TRM vaguely saying
+	 * it depends on some cycles of the AHB bus clock to function it. So
+	 * add sufficient 1ms here.
+	 */
+	rockchip_pcie_ep_assert_intx(ep, fn, intx, true);
+	mdelay(1);
+	rockchip_pcie_ep_assert_intx(ep, fn, intx, false);
+	return 0;
+}
+
+static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
+					 u8 interrupt_num)
+{
+	struct rockchip_pcie *rockchip = &ep->rockchip;
+	u32 flags, mme, data, data_mask;
+	u8 msi_count;
+	u64 pci_addr, pci_addr_mask = 0xff;
+
+	/* Check MSI enable bit */
+	flags = rockchip_pcie_read(&ep->rockchip,
+				   ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+				   ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+	if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
+		return -EINVAL;
+
+	/* Get MSI numbers from MME */
+	mme = ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
+			ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
+	msi_count = 1 << mme;
+	if (!interrupt_num || interrupt_num > msi_count)
+		return -EINVAL;
+
+	/* Set MSI private data */
+	data_mask = msi_count - 1;
+	data = rockchip_pcie_read(rockchip,
+				  ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+				  ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
+				  PCI_MSI_DATA_64);
+	data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask);
+
+	/* Get MSI PCI address */
+	pci_addr = rockchip_pcie_read(rockchip,
+				      ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+				      ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
+				      PCI_MSI_ADDRESS_HI);
+	pci_addr <<= 32;
+	pci_addr |= rockchip_pcie_read(rockchip,
+				       ROCKCHIP_PCIE_EP_FUNC_BASE(fn) +
+				       ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
+				       PCI_MSI_ADDRESS_LO);
+	pci_addr &= GENMASK_ULL(63, 2);
+
+	/* Set the outbound region if needed. */
+	if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
+		     ep->irq_pci_fn != fn)) {
+		rockchip_pcie_prog_ep_ob_atu(rockchip, fn, ep->max_regions - 1,
+					     AXI_WRAPPER_MEM_WRITE,
+					     ep->irq_phys_addr,
+					     pci_addr & ~pci_addr_mask,
+					     pci_addr_mask + 1);
+		ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
+		ep->irq_pci_fn = fn;
+	}
+
+	writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
+	return 0;
+}
+
+static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
+				      enum pci_epc_irq_type type,
+				      u16 interrupt_num)
+{
+	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+
+	switch (type) {
+	case PCI_EPC_IRQ_LEGACY:
+		return rockchip_pcie_ep_send_legacy_irq(ep, fn, 0);
+	case PCI_EPC_IRQ_MSI:
+		return rockchip_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
+	default:
+		return -EINVAL;
+	}
+}
+
+static int rockchip_pcie_ep_start(struct pci_epc *epc)
+{
+	struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+	struct rockchip_pcie *rockchip = &ep->rockchip;
+	struct pci_epf *epf;
+	u32 cfg;
+
+	cfg = BIT(0);
+	list_for_each_entry(epf, &epc->pci_epf, list)
+		cfg |= BIT(epf->func_no);
+
+	rockchip_pcie_write(rockchip, cfg, PCIE_CORE_PHY_FUNC_CFG);
+
+	return 0;
+}
+
+static const struct pci_epc_features rockchip_pcie_epc_features = {
+	.linkup_notifier = false,
+	.msi_capable = true,
+	.msix_capable = false,
+	.align = 256,
+};
+
+static const struct pci_epc_features*
+rockchip_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+{
+	return &rockchip_pcie_epc_features;
+}
+
+static const struct pci_epc_ops rockchip_pcie_epc_ops = {
+	.write_header	= rockchip_pcie_ep_write_header,
+	.set_bar	= rockchip_pcie_ep_set_bar,
+	.clear_bar	= rockchip_pcie_ep_clear_bar,
+	.map_addr	= rockchip_pcie_ep_map_addr,
+	.unmap_addr	= rockchip_pcie_ep_unmap_addr,
+	.set_msi	= rockchip_pcie_ep_set_msi,
+	.get_msi	= rockchip_pcie_ep_get_msi,
+	.raise_irq	= rockchip_pcie_ep_raise_irq,
+	.start		= rockchip_pcie_ep_start,
+	.get_features	= rockchip_pcie_ep_get_features,
+};
+
+static int rockchip_pcie_parse_ep_dt(struct rockchip_pcie *rockchip,
+				     struct rockchip_pcie_ep *ep)
+{
+	struct device *dev = rockchip->dev;
+	int err;
+
+	err = rockchip_pcie_parse_dt(rockchip);
+	if (err)
+		return err;
+
+	err = rockchip_pcie_get_phys(rockchip);
+	if (err)
+		return err;
+
+	err = of_property_read_u32(dev->of_node,
+				   "rockchip,max-outbound-regions",
+				   &ep->max_regions);
+	if (err < 0 || ep->max_regions > MAX_REGION_LIMIT)
+		ep->max_regions = MAX_REGION_LIMIT;
+
+	err = of_property_read_u8(dev->of_node, "max-functions",
+				  &ep->epc->max_functions);
+	if (err < 0)
+		ep->epc->max_functions = 1;
+
+	return 0;
+}
+
+static const struct of_device_id rockchip_pcie_ep_of_match[] = {
+	{ .compatible = "rockchip,rk3399-pcie-ep"},
+	{},
+};
+
+static int rockchip_pcie_ep_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct rockchip_pcie_ep *ep;
+	struct rockchip_pcie *rockchip;
+	struct pci_epc *epc;
+	size_t max_regions;
+	int err;
+
+	ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+	if (!ep)
+		return -ENOMEM;
+
+	rockchip = &ep->rockchip;
+	rockchip->is_rc = false;
+	rockchip->dev = dev;
+
+	epc = devm_pci_epc_create(dev, &rockchip_pcie_epc_ops);
+	if (IS_ERR(epc)) {
+		dev_err(dev, "failed to create epc device\n");
+		return PTR_ERR(epc);
+	}
+
+	ep->epc = epc;
+	epc_set_drvdata(epc, ep);
+
+	err = rockchip_pcie_parse_ep_dt(rockchip, ep);
+	if (err)
+		return err;
+
+	err = rockchip_pcie_enable_clocks(rockchip);
+	if (err)
+		return err;
+
+	err = rockchip_pcie_init_port(rockchip);
+	if (err)
+		goto err_disable_clocks;
+
+	/* Establish the link automatically */
+	rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
+			    PCIE_CLIENT_CONFIG);
+
+	max_regions = ep->max_regions;
+	ep->ob_addr = devm_kcalloc(dev, max_regions, sizeof(*ep->ob_addr),
+				   GFP_KERNEL);
+
+	if (!ep->ob_addr) {
+		err = -ENOMEM;
+		goto err_uninit_port;
+	}
+
+	/* Only enable function 0 by default */
+	rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG);
+
+	err = pci_epc_mem_init(epc, rockchip->mem_res->start,
+			       resource_size(rockchip->mem_res));
+	if (err < 0) {
+		dev_err(dev, "failed to initialize the memory space\n");
+		goto err_uninit_port;
+	}
+
+	ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
+						  SZ_128K);
+	if (!ep->irq_cpu_addr) {
+		dev_err(dev, "failed to reserve memory space for MSI\n");
+		err = -ENOMEM;
+		goto err_epc_mem_exit;
+	}
+
+	ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR;
+
+	rockchip_pcie_write(rockchip, PCIE_CLIENT_CONF_ENABLE,
+			    PCIE_CLIENT_CONFIG);
+
+	return 0;
+err_epc_mem_exit:
+	pci_epc_mem_exit(epc);
+err_uninit_port:
+	rockchip_pcie_deinit_phys(rockchip);
+err_disable_clocks:
+	rockchip_pcie_disable_clocks(rockchip);
+	return err;
+}
+
+static struct platform_driver rockchip_pcie_ep_driver = {
+	.driver = {
+		.name = "rockchip-pcie-ep",
+		.of_match_table = rockchip_pcie_ep_of_match,
+	},
+	.probe = rockchip_pcie_ep_probe,
+};
+
+builtin_platform_driver(rockchip_pcie_ep_driver);
diff --git a/marvell/linux/drivers/pci/controller/pcie-rockchip-host.c b/marvell/linux/drivers/pci/controller/pcie-rockchip-host.c
new file mode 100644
index 0000000..ef8e677
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pcie-rockchip-host.c
@@ -0,0 +1,1143 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Rockchip AXI PCIe host controller driver
+ *
+ * Copyright (c) 2016 Rockchip, Inc.
+ *
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ *         Wenrui Li <wenrui.li@rock-chips.com>
+ *
+ * Bits taken from Synopsys DesignWare Host controller driver and
+ * ARM PCI Host generic driver.
+ */
+
+#include <linux/bitrev.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/regmap.h>
+
+#include "../pci.h"
+#include "pcie-rockchip.h"
+
+static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip)
+{
+	u32 status;
+
+	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+	status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
+	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+}
+
+static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip)
+{
+	u32 status;
+
+	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+	status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16;
+	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+}
+
+static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip)
+{
+	u32 val;
+
+	/* Update Tx credit maximum update interval */
+	val = rockchip_pcie_read(rockchip, PCIE_CORE_TXCREDIT_CFG1);
+	val &= ~PCIE_CORE_TXCREDIT_CFG1_MUI_MASK;
+	val |= PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(24000);	/* ns */
+	rockchip_pcie_write(rockchip, val, PCIE_CORE_TXCREDIT_CFG1);
+}
+
+static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip,
+				      struct pci_bus *bus, int dev)
+{
+	/* access only one slot on each root port */
+	if (bus->number == rockchip->root_bus_nr && dev > 0)
+		return 0;
+
+	/*
+	 * do not read more than one device on the bus directly attached
+	 * to RC's downstream side.
+	 */
+	if (bus->primary == rockchip->root_bus_nr && dev > 0)
+		return 0;
+
+	return 1;
+}
+
+static u8 rockchip_pcie_lane_map(struct rockchip_pcie *rockchip)
+{
+	u32 val;
+	u8 map;
+
+	if (rockchip->legacy_phy)
+		return GENMASK(MAX_LANE_NUM - 1, 0);
+
+	val = rockchip_pcie_read(rockchip, PCIE_CORE_LANE_MAP);
+	map = val & PCIE_CORE_LANE_MAP_MASK;
+
+	/* The link may be using a reverse-indexed mapping. */
+	if (val & PCIE_CORE_LANE_MAP_REVERSE)
+		map = bitrev8(map) >> 4;
+
+	return map;
+}
+
+static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip,
+				     int where, int size, u32 *val)
+{
+	void __iomem *addr;
+
+	addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where;
+
+	if (!IS_ALIGNED((uintptr_t)addr, size)) {
+		*val = 0;
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+	}
+
+	if (size == 4) {
+		*val = readl(addr);
+	} else if (size == 2) {
+		*val = readw(addr);
+	} else if (size == 1) {
+		*val = readb(addr);
+	} else {
+		*val = 0;
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+	}
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip,
+				     int where, int size, u32 val)
+{
+	u32 mask, tmp, offset;
+	void __iomem *addr;
+
+	offset = where & ~0x3;
+	addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset;
+
+	if (size == 4) {
+		writel(val, addr);
+		return PCIBIOS_SUCCESSFUL;
+	}
+
+	mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
+
+	/*
+	 * N.B. This read/modify/write isn't safe in general because it can
+	 * corrupt RW1C bits in adjacent registers.  But the hardware
+	 * doesn't support smaller writes.
+	 */
+	tmp = readl(addr) & mask;
+	tmp |= val << ((where & 0x3) * 8);
+	writel(tmp, addr);
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
+				       struct pci_bus *bus, u32 devfn,
+				       int where, int size, u32 *val)
+{
+	u32 busdev;
+
+	busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
+				PCI_FUNC(devfn), where);
+
+	if (!IS_ALIGNED(busdev, size)) {
+		*val = 0;
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+	}
+
+	if (bus->parent->number == rockchip->root_bus_nr)
+		rockchip_pcie_cfg_configuration_accesses(rockchip,
+						AXI_WRAPPER_TYPE0_CFG);
+	else
+		rockchip_pcie_cfg_configuration_accesses(rockchip,
+						AXI_WRAPPER_TYPE1_CFG);
+
+	if (size == 4) {
+		*val = readl(rockchip->reg_base + busdev);
+	} else if (size == 2) {
+		*val = readw(rockchip->reg_base + busdev);
+	} else if (size == 1) {
+		*val = readb(rockchip->reg_base + busdev);
+	} else {
+		*val = 0;
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+	}
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip,
+				       struct pci_bus *bus, u32 devfn,
+				       int where, int size, u32 val)
+{
+	u32 busdev;
+
+	busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
+				PCI_FUNC(devfn), where);
+	if (!IS_ALIGNED(busdev, size))
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+
+	if (bus->parent->number == rockchip->root_bus_nr)
+		rockchip_pcie_cfg_configuration_accesses(rockchip,
+						AXI_WRAPPER_TYPE0_CFG);
+	else
+		rockchip_pcie_cfg_configuration_accesses(rockchip,
+						AXI_WRAPPER_TYPE1_CFG);
+
+	if (size == 4)
+		writel(val, rockchip->reg_base + busdev);
+	else if (size == 2)
+		writew(val, rockchip->reg_base + busdev);
+	else if (size == 1)
+		writeb(val, rockchip->reg_base + busdev);
+	else
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+				 int size, u32 *val)
+{
+	struct rockchip_pcie *rockchip = bus->sysdata;
+
+	if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) {
+		*val = 0xffffffff;
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+
+	if (bus->number == rockchip->root_bus_nr)
+		return rockchip_pcie_rd_own_conf(rockchip, where, size, val);
+
+	return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size,
+					   val);
+}
+
+static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+				 int where, int size, u32 val)
+{
+	struct rockchip_pcie *rockchip = bus->sysdata;
+
+	if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn)))
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	if (bus->number == rockchip->root_bus_nr)
+		return rockchip_pcie_wr_own_conf(rockchip, where, size, val);
+
+	return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size,
+					   val);
+}
+
+static struct pci_ops rockchip_pcie_ops = {
+	.read = rockchip_pcie_rd_conf,
+	.write = rockchip_pcie_wr_conf,
+};
+
+static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
+{
+	int curr;
+	u32 status, scale, power;
+
+	if (IS_ERR(rockchip->vpcie3v3))
+		return;
+
+	/*
+	 * Set RC's captured slot power limit and scale if
+	 * vpcie3v3 available. The default values are both zero
+	 * which means the software should set these two according
+	 * to the actual power supply.
+	 */
+	curr = regulator_get_current_limit(rockchip->vpcie3v3);
+	if (curr <= 0)
+		return;
+
+	scale = 3; /* 0.001x */
+	curr = curr / 1000; /* convert to mA */
+	power = (curr * 3300) / 1000; /* milliwatt */
+	while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) {
+		if (!scale) {
+			dev_warn(rockchip->dev, "invalid power supply\n");
+			return;
+		}
+		scale--;
+		power = power / 10;
+	}
+
+	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR);
+	status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) |
+		  (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT);
+	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR);
+}
+
+/**
+ * rockchip_pcie_host_init_port - Initialize hardware
+ * @rockchip: PCIe port information
+ */
+static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
+{
+	struct device *dev = rockchip->dev;
+	int err, i = MAX_LANE_NUM;
+	u32 status;
+
+	gpiod_set_value_cansleep(rockchip->ep_gpio, 0);
+
+	err = rockchip_pcie_init_port(rockchip);
+	if (err)
+		return err;
+
+	/* Fix the transmitted FTS count desired to exit from L0s. */
+	status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1);
+	status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) |
+		 (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT);
+	rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1);
+
+	rockchip_pcie_set_power_limit(rockchip);
+
+	/* Set RC's clock architecture as common clock */
+	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+	status |= PCI_EXP_LNKSTA_SLC << 16;
+	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
+	/* Set RC's RCB to 128 */
+	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+	status |= PCI_EXP_LNKCTL_RCB;
+	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
+	/* Enable Gen1 training */
+	rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
+			    PCIE_CLIENT_CONFIG);
+
+	gpiod_set_value_cansleep(rockchip->ep_gpio, 1);
+
+	/* 500ms timeout value should be enough for Gen1/2 training */
+	err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
+				 status, PCIE_LINK_UP(status), 20,
+				 500 * USEC_PER_MSEC);
+	if (err) {
+		dev_err(dev, "PCIe link training gen1 timeout!\n");
+		goto err_power_off_phy;
+	}
+
+	if (rockchip->link_gen == 2) {
+		/*
+		 * Enable retrain for gen2. This should be configured only after
+		 * gen1 finished.
+		 */
+		status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+		status |= PCI_EXP_LNKCTL_RL;
+		rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
+		err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
+					 status, PCIE_LINK_IS_GEN2(status), 20,
+					 500 * USEC_PER_MSEC);
+		if (err)
+			dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n");
+	}
+
+	/* Check the final link width from negotiated lane counter from MGMT */
+	status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
+	status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >>
+			  PCIE_CORE_PL_CONF_LANE_SHIFT);
+	dev_dbg(dev, "current link width is x%d\n", status);
+
+	/* Power off unused lane(s) */
+	rockchip->lanes_map = rockchip_pcie_lane_map(rockchip);
+	for (i = 0; i < MAX_LANE_NUM; i++) {
+		if (!(rockchip->lanes_map & BIT(i))) {
+			dev_dbg(dev, "idling lane %d\n", i);
+			phy_power_off(rockchip->phys[i]);
+		}
+	}
+
+	rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
+			    PCIE_CORE_CONFIG_VENDOR);
+	rockchip_pcie_write(rockchip,
+			    PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT,
+			    PCIE_RC_CONFIG_RID_CCR);
+
+	/* Clear THP cap's next cap pointer to remove L1 substate cap */
+	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_THP_CAP);
+	status &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK;
+	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_THP_CAP);
+
+	/* Clear L0s from RC's link cap */
+	if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) {
+		status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP);
+		status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S;
+		rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP);
+	}
+
+	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR);
+	status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK;
+	status |= PCIE_RC_CONFIG_DCSR_MPS_256;
+	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR);
+
+	return 0;
+err_power_off_phy:
+	while (i--)
+		phy_power_off(rockchip->phys[i]);
+	i = MAX_LANE_NUM;
+	while (i--)
+		phy_exit(rockchip->phys[i]);
+	return err;
+}
+
+static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
+{
+	struct rockchip_pcie *rockchip = arg;
+	struct device *dev = rockchip->dev;
+	u32 reg;
+	u32 sub_reg;
+
+	reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
+	if (reg & PCIE_CLIENT_INT_LOCAL) {
+		dev_dbg(dev, "local interrupt received\n");
+		sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS);
+		if (sub_reg & PCIE_CORE_INT_PRFPE)
+			dev_dbg(dev, "parity error detected while reading from the PNP receive FIFO RAM\n");
+
+		if (sub_reg & PCIE_CORE_INT_CRFPE)
+			dev_dbg(dev, "parity error detected while reading from the Completion Receive FIFO RAM\n");
+
+		if (sub_reg & PCIE_CORE_INT_RRPE)
+			dev_dbg(dev, "parity error detected while reading from replay buffer RAM\n");
+
+		if (sub_reg & PCIE_CORE_INT_PRFO)
+			dev_dbg(dev, "overflow occurred in the PNP receive FIFO\n");
+
+		if (sub_reg & PCIE_CORE_INT_CRFO)
+			dev_dbg(dev, "overflow occurred in the completion receive FIFO\n");
+
+		if (sub_reg & PCIE_CORE_INT_RT)
+			dev_dbg(dev, "replay timer timed out\n");
+
+		if (sub_reg & PCIE_CORE_INT_RTR)
+			dev_dbg(dev, "replay timer rolled over after 4 transmissions of the same TLP\n");
+
+		if (sub_reg & PCIE_CORE_INT_PE)
+			dev_dbg(dev, "phy error detected on receive side\n");
+
+		if (sub_reg & PCIE_CORE_INT_MTR)
+			dev_dbg(dev, "malformed TLP received from the link\n");
+
+		if (sub_reg & PCIE_CORE_INT_UCR)
+			dev_dbg(dev, "malformed TLP received from the link\n");
+
+		if (sub_reg & PCIE_CORE_INT_FCE)
+			dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n");
+
+		if (sub_reg & PCIE_CORE_INT_CT)
+			dev_dbg(dev, "a request timed out waiting for completion\n");
+
+		if (sub_reg & PCIE_CORE_INT_UTC)
+			dev_dbg(dev, "unmapped TC error\n");
+
+		if (sub_reg & PCIE_CORE_INT_MMVC)
+			dev_dbg(dev, "MSI mask register changes\n");
+
+		rockchip_pcie_write(rockchip, sub_reg, PCIE_CORE_INT_STATUS);
+	} else if (reg & PCIE_CLIENT_INT_PHY) {
+		dev_dbg(dev, "phy link changes\n");
+		rockchip_pcie_update_txcredit_mui(rockchip);
+		rockchip_pcie_clr_bw_int(rockchip);
+	}
+
+	rockchip_pcie_write(rockchip, reg & PCIE_CLIENT_INT_LOCAL,
+			    PCIE_CLIENT_INT_STATUS);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg)
+{
+	struct rockchip_pcie *rockchip = arg;
+	struct device *dev = rockchip->dev;
+	u32 reg;
+
+	reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
+	if (reg & PCIE_CLIENT_INT_LEGACY_DONE)
+		dev_dbg(dev, "legacy done interrupt received\n");
+
+	if (reg & PCIE_CLIENT_INT_MSG)
+		dev_dbg(dev, "message done interrupt received\n");
+
+	if (reg & PCIE_CLIENT_INT_HOT_RST)
+		dev_dbg(dev, "hot reset interrupt received\n");
+
+	if (reg & PCIE_CLIENT_INT_DPA)
+		dev_dbg(dev, "dpa interrupt received\n");
+
+	if (reg & PCIE_CLIENT_INT_FATAL_ERR)
+		dev_dbg(dev, "fatal error interrupt received\n");
+
+	if (reg & PCIE_CLIENT_INT_NFATAL_ERR)
+		dev_dbg(dev, "no fatal error interrupt received\n");
+
+	if (reg & PCIE_CLIENT_INT_CORR_ERR)
+		dev_dbg(dev, "correctable error interrupt received\n");
+
+	if (reg & PCIE_CLIENT_INT_PHY)
+		dev_dbg(dev, "phy interrupt received\n");
+
+	rockchip_pcie_write(rockchip, reg & (PCIE_CLIENT_INT_LEGACY_DONE |
+			      PCIE_CLIENT_INT_MSG | PCIE_CLIENT_INT_HOT_RST |
+			      PCIE_CLIENT_INT_DPA | PCIE_CLIENT_INT_FATAL_ERR |
+			      PCIE_CLIENT_INT_NFATAL_ERR |
+			      PCIE_CLIENT_INT_CORR_ERR |
+			      PCIE_CLIENT_INT_PHY),
+		   PCIE_CLIENT_INT_STATUS);
+
+	return IRQ_HANDLED;
+}
+
+static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
+{
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
+	struct device *dev = rockchip->dev;
+	u32 reg;
+	u32 hwirq;
+	u32 virq;
+
+	chained_irq_enter(chip, desc);
+
+	reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
+	reg = (reg & PCIE_CLIENT_INTR_MASK) >> PCIE_CLIENT_INTR_SHIFT;
+
+	while (reg) {
+		hwirq = ffs(reg) - 1;
+		reg &= ~BIT(hwirq);
+
+		virq = irq_find_mapping(rockchip->irq_domain, hwirq);
+		if (virq)
+			generic_handle_irq(virq);
+		else
+			dev_err(dev, "unexpected IRQ, INT%d\n", hwirq);
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip)
+{
+	int irq, err;
+	struct device *dev = rockchip->dev;
+	struct platform_device *pdev = to_platform_device(dev);
+
+	irq = platform_get_irq_byname(pdev, "sys");
+	if (irq < 0) {
+		dev_err(dev, "missing sys IRQ resource\n");
+		return irq;
+	}
+
+	err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler,
+			       IRQF_SHARED, "pcie-sys", rockchip);
+	if (err) {
+		dev_err(dev, "failed to request PCIe subsystem IRQ\n");
+		return err;
+	}
+
+	irq = platform_get_irq_byname(pdev, "legacy");
+	if (irq < 0) {
+		dev_err(dev, "missing legacy IRQ resource\n");
+		return irq;
+	}
+
+	irq_set_chained_handler_and_data(irq,
+					 rockchip_pcie_legacy_int_handler,
+					 rockchip);
+
+	irq = platform_get_irq_byname(pdev, "client");
+	if (irq < 0) {
+		dev_err(dev, "missing client IRQ resource\n");
+		return irq;
+	}
+
+	err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler,
+			       IRQF_SHARED, "pcie-client", rockchip);
+	if (err) {
+		dev_err(dev, "failed to request PCIe client IRQ\n");
+		return err;
+	}
+
+	return 0;
+}
+
+/**
+ * rockchip_pcie_parse_host_dt - Parse Device Tree
+ * @rockchip: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip)
+{
+	struct device *dev = rockchip->dev;
+	int err;
+
+	err = rockchip_pcie_parse_dt(rockchip);
+	if (err)
+		return err;
+
+	err = rockchip_pcie_setup_irq(rockchip);
+	if (err)
+		return err;
+
+	rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v");
+	if (IS_ERR(rockchip->vpcie12v)) {
+		if (PTR_ERR(rockchip->vpcie12v) != -ENODEV)
+			return PTR_ERR(rockchip->vpcie12v);
+		dev_info(dev, "no vpcie12v regulator found\n");
+	}
+
+	rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
+	if (IS_ERR(rockchip->vpcie3v3)) {
+		if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV)
+			return PTR_ERR(rockchip->vpcie3v3);
+		dev_info(dev, "no vpcie3v3 regulator found\n");
+	}
+
+	rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
+	if (IS_ERR(rockchip->vpcie1v8)) {
+		if (PTR_ERR(rockchip->vpcie1v8) != -ENODEV)
+			return PTR_ERR(rockchip->vpcie1v8);
+		dev_info(dev, "no vpcie1v8 regulator found\n");
+	}
+
+	rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
+	if (IS_ERR(rockchip->vpcie0v9)) {
+		if (PTR_ERR(rockchip->vpcie0v9) != -ENODEV)
+			return PTR_ERR(rockchip->vpcie0v9);
+		dev_info(dev, "no vpcie0v9 regulator found\n");
+	}
+
+	return 0;
+}
+
+static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip)
+{
+	struct device *dev = rockchip->dev;
+	int err;
+
+	if (!IS_ERR(rockchip->vpcie12v)) {
+		err = regulator_enable(rockchip->vpcie12v);
+		if (err) {
+			dev_err(dev, "fail to enable vpcie12v regulator\n");
+			goto err_out;
+		}
+	}
+
+	if (!IS_ERR(rockchip->vpcie3v3)) {
+		err = regulator_enable(rockchip->vpcie3v3);
+		if (err) {
+			dev_err(dev, "fail to enable vpcie3v3 regulator\n");
+			goto err_disable_12v;
+		}
+	}
+
+	if (!IS_ERR(rockchip->vpcie1v8)) {
+		err = regulator_enable(rockchip->vpcie1v8);
+		if (err) {
+			dev_err(dev, "fail to enable vpcie1v8 regulator\n");
+			goto err_disable_3v3;
+		}
+	}
+
+	if (!IS_ERR(rockchip->vpcie0v9)) {
+		err = regulator_enable(rockchip->vpcie0v9);
+		if (err) {
+			dev_err(dev, "fail to enable vpcie0v9 regulator\n");
+			goto err_disable_1v8;
+		}
+	}
+
+	return 0;
+
+err_disable_1v8:
+	if (!IS_ERR(rockchip->vpcie1v8))
+		regulator_disable(rockchip->vpcie1v8);
+err_disable_3v3:
+	if (!IS_ERR(rockchip->vpcie3v3))
+		regulator_disable(rockchip->vpcie3v3);
+err_disable_12v:
+	if (!IS_ERR(rockchip->vpcie12v))
+		regulator_disable(rockchip->vpcie12v);
+err_out:
+	return err;
+}
+
+static void rockchip_pcie_enable_interrupts(struct rockchip_pcie *rockchip)
+{
+	rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) &
+			    (~PCIE_CLIENT_INT_CLI), PCIE_CLIENT_INT_MASK);
+	rockchip_pcie_write(rockchip, (u32)(~PCIE_CORE_INT),
+			    PCIE_CORE_INT_MASK);
+
+	rockchip_pcie_enable_bw_int(rockchip);
+}
+
+static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+				  irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+	irq_set_chip_data(irq, domain->host_data);
+
+	return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+	.map = rockchip_pcie_intx_map,
+};
+
+static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
+{
+	struct device *dev = rockchip->dev;
+	struct device_node *intc = of_get_next_child(dev->of_node, NULL);
+
+	if (!intc) {
+		dev_err(dev, "missing child interrupt-controller node\n");
+		return -EINVAL;
+	}
+
+	rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
+						    &intx_domain_ops, rockchip);
+	of_node_put(intc);
+	if (!rockchip->irq_domain) {
+		dev_err(dev, "failed to get a INTx IRQ domain\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip,
+				     int region_no, int type, u8 num_pass_bits,
+				     u32 lower_addr, u32 upper_addr)
+{
+	u32 ob_addr_0;
+	u32 ob_addr_1;
+	u32 ob_desc_0;
+	u32 aw_offset;
+
+	if (region_no >= MAX_AXI_WRAPPER_REGION_NUM)
+		return -EINVAL;
+	if (num_pass_bits + 1 < 8)
+		return -EINVAL;
+	if (num_pass_bits > 63)
+		return -EINVAL;
+	if (region_no == 0) {
+		if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits))
+			return -EINVAL;
+	}
+	if (region_no != 0) {
+		if (AXI_REGION_SIZE < (2ULL << num_pass_bits))
+			return -EINVAL;
+	}
+
+	aw_offset = (region_no << OB_REG_SIZE_SHIFT);
+
+	ob_addr_0 = num_pass_bits & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS;
+	ob_addr_0 |= lower_addr & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR;
+	ob_addr_1 = upper_addr;
+	ob_desc_0 = (1 << 23 | type);
+
+	rockchip_pcie_write(rockchip, ob_addr_0,
+			    PCIE_CORE_OB_REGION_ADDR0 + aw_offset);
+	rockchip_pcie_write(rockchip, ob_addr_1,
+			    PCIE_CORE_OB_REGION_ADDR1 + aw_offset);
+	rockchip_pcie_write(rockchip, ob_desc_0,
+			    PCIE_CORE_OB_REGION_DESC0 + aw_offset);
+	rockchip_pcie_write(rockchip, 0,
+			    PCIE_CORE_OB_REGION_DESC1 + aw_offset);
+
+	return 0;
+}
+
+static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
+				     int region_no, u8 num_pass_bits,
+				     u32 lower_addr, u32 upper_addr)
+{
+	u32 ib_addr_0;
+	u32 ib_addr_1;
+	u32 aw_offset;
+
+	if (region_no > MAX_AXI_IB_ROOTPORT_REGION_NUM)
+		return -EINVAL;
+	if (num_pass_bits + 1 < MIN_AXI_ADDR_BITS_PASSED)
+		return -EINVAL;
+	if (num_pass_bits > 63)
+		return -EINVAL;
+
+	aw_offset = (region_no << IB_ROOT_PORT_REG_SIZE_SHIFT);
+
+	ib_addr_0 = num_pass_bits & PCIE_CORE_IB_REGION_ADDR0_NUM_BITS;
+	ib_addr_0 |= (lower_addr << 8) & PCIE_CORE_IB_REGION_ADDR0_LO_ADDR;
+	ib_addr_1 = upper_addr;
+
+	rockchip_pcie_write(rockchip, ib_addr_0, PCIE_RP_IB_ADDR0 + aw_offset);
+	rockchip_pcie_write(rockchip, ib_addr_1, PCIE_RP_IB_ADDR1 + aw_offset);
+
+	return 0;
+}
+
+static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
+{
+	struct device *dev = rockchip->dev;
+	int offset;
+	int err;
+	int reg_no;
+
+	rockchip_pcie_cfg_configuration_accesses(rockchip,
+						 AXI_WRAPPER_TYPE0_CFG);
+
+	for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) {
+		err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
+						AXI_WRAPPER_MEM_WRITE,
+						20 - 1,
+						rockchip->mem_bus_addr +
+						(reg_no << 20),
+						0);
+		if (err) {
+			dev_err(dev, "program RC mem outbound ATU failed\n");
+			return err;
+		}
+	}
+
+	err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0);
+	if (err) {
+		dev_err(dev, "program RC mem inbound ATU failed\n");
+		return err;
+	}
+
+	offset = rockchip->mem_size >> 20;
+	for (reg_no = 0; reg_no < (rockchip->io_size >> 20); reg_no++) {
+		err = rockchip_pcie_prog_ob_atu(rockchip,
+						reg_no + 1 + offset,
+						AXI_WRAPPER_IO_WRITE,
+						20 - 1,
+						rockchip->io_bus_addr +
+						(reg_no << 20),
+						0);
+		if (err) {
+			dev_err(dev, "program RC io outbound ATU failed\n");
+			return err;
+		}
+	}
+
+	/* assign message regions */
+	rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1 + offset,
+				  AXI_WRAPPER_NOR_MSG,
+				  20 - 1, 0, 0);
+
+	rockchip->msg_bus_addr = rockchip->mem_bus_addr +
+					((reg_no + offset) << 20);
+	return err;
+}
+
+static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip)
+{
+	u32 value;
+	int err;
+
+	/* send PME_TURN_OFF message */
+	writel(0x0, rockchip->msg_region + PCIE_RC_SEND_PME_OFF);
+
+	/* read LTSSM and wait for falling into L2 link state */
+	err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_DEBUG_OUT_0,
+				 value, PCIE_LINK_IS_L2(value), 20,
+				 jiffies_to_usecs(5 * HZ));
+	if (err) {
+		dev_err(rockchip->dev, "PCIe link enter L2 timeout!\n");
+		return err;
+	}
+
+	return 0;
+}
+
+static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
+{
+	struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
+	int ret;
+
+	/* disable core and cli int since we don't need to ack PME_ACK */
+	rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) |
+			    PCIE_CLIENT_INT_CLI, PCIE_CLIENT_INT_MASK);
+	rockchip_pcie_write(rockchip, (u32)PCIE_CORE_INT, PCIE_CORE_INT_MASK);
+
+	ret = rockchip_pcie_wait_l2(rockchip);
+	if (ret) {
+		rockchip_pcie_enable_interrupts(rockchip);
+		return ret;
+	}
+
+	rockchip_pcie_deinit_phys(rockchip);
+
+	rockchip_pcie_disable_clocks(rockchip);
+
+	if (!IS_ERR(rockchip->vpcie0v9))
+		regulator_disable(rockchip->vpcie0v9);
+
+	return ret;
+}
+
+static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
+{
+	struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
+	int err;
+
+	if (!IS_ERR(rockchip->vpcie0v9)) {
+		err = regulator_enable(rockchip->vpcie0v9);
+		if (err) {
+			dev_err(dev, "fail to enable vpcie0v9 regulator\n");
+			return err;
+		}
+	}
+
+	err = rockchip_pcie_enable_clocks(rockchip);
+	if (err)
+		goto err_disable_0v9;
+
+	err = rockchip_pcie_host_init_port(rockchip);
+	if (err)
+		goto err_pcie_resume;
+
+	err = rockchip_pcie_cfg_atu(rockchip);
+	if (err)
+		goto err_err_deinit_port;
+
+	/* Need this to enter L1 again */
+	rockchip_pcie_update_txcredit_mui(rockchip);
+	rockchip_pcie_enable_interrupts(rockchip);
+
+	return 0;
+
+err_err_deinit_port:
+	rockchip_pcie_deinit_phys(rockchip);
+err_pcie_resume:
+	rockchip_pcie_disable_clocks(rockchip);
+err_disable_0v9:
+	if (!IS_ERR(rockchip->vpcie0v9))
+		regulator_disable(rockchip->vpcie0v9);
+	return err;
+}
+
+static int rockchip_pcie_probe(struct platform_device *pdev)
+{
+	struct rockchip_pcie *rockchip;
+	struct device *dev = &pdev->dev;
+	struct pci_bus *bus, *child;
+	struct pci_host_bridge *bridge;
+	struct resource_entry *win;
+	resource_size_t io_base;
+	struct resource	*mem;
+	struct resource	*io;
+	int err;
+
+	LIST_HEAD(res);
+
+	if (!dev->of_node)
+		return -ENODEV;
+
+	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rockchip));
+	if (!bridge)
+		return -ENOMEM;
+
+	rockchip = pci_host_bridge_priv(bridge);
+
+	platform_set_drvdata(pdev, rockchip);
+	rockchip->dev = dev;
+	rockchip->is_rc = true;
+
+	err = rockchip_pcie_parse_host_dt(rockchip);
+	if (err)
+		return err;
+
+	err = rockchip_pcie_enable_clocks(rockchip);
+	if (err)
+		return err;
+
+	err = rockchip_pcie_set_vpcie(rockchip);
+	if (err) {
+		dev_err(dev, "failed to set vpcie regulator\n");
+		goto err_set_vpcie;
+	}
+
+	err = rockchip_pcie_host_init_port(rockchip);
+	if (err)
+		goto err_vpcie;
+
+	rockchip_pcie_enable_interrupts(rockchip);
+
+	err = rockchip_pcie_init_irq_domain(rockchip);
+	if (err < 0)
+		goto err_deinit_port;
+
+	err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
+						    &res, &io_base);
+	if (err)
+		goto err_remove_irq_domain;
+
+	err = devm_request_pci_bus_resources(dev, &res);
+	if (err)
+		goto err_free_res;
+
+	/* Get the I/O and memory ranges from DT */
+	resource_list_for_each_entry(win, &res) {
+		switch (resource_type(win->res)) {
+		case IORESOURCE_IO:
+			io = win->res;
+			io->name = "I/O";
+			rockchip->io_size = resource_size(io);
+			rockchip->io_bus_addr = io->start - win->offset;
+			err = pci_remap_iospace(io, io_base);
+			if (err) {
+				dev_warn(dev, "error %d: failed to map resource %pR\n",
+					 err, io);
+				continue;
+			}
+			rockchip->io = io;
+			break;
+		case IORESOURCE_MEM:
+			mem = win->res;
+			mem->name = "MEM";
+			rockchip->mem_size = resource_size(mem);
+			rockchip->mem_bus_addr = mem->start - win->offset;
+			break;
+		case IORESOURCE_BUS:
+			rockchip->root_bus_nr = win->res->start;
+			break;
+		default:
+			continue;
+		}
+	}
+
+	err = rockchip_pcie_cfg_atu(rockchip);
+	if (err)
+		goto err_unmap_iospace;
+
+	rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M);
+	if (!rockchip->msg_region) {
+		err = -ENOMEM;
+		goto err_unmap_iospace;
+	}
+
+	list_splice_init(&res, &bridge->windows);
+	bridge->dev.parent = dev;
+	bridge->sysdata = rockchip;
+	bridge->busnr = 0;
+	bridge->ops = &rockchip_pcie_ops;
+	bridge->map_irq = of_irq_parse_and_map_pci;
+	bridge->swizzle_irq = pci_common_swizzle;
+
+	err = pci_scan_root_bus_bridge(bridge);
+	if (err < 0)
+		goto err_unmap_iospace;
+
+	bus = bridge->bus;
+
+	rockchip->root_bus = bus;
+
+	pci_bus_size_bridges(bus);
+	pci_bus_assign_resources(bus);
+	list_for_each_entry(child, &bus->children, node)
+		pcie_bus_configure_settings(child);
+
+	pci_bus_add_devices(bus);
+	return 0;
+
+err_unmap_iospace:
+	pci_unmap_iospace(rockchip->io);
+err_free_res:
+	pci_free_resource_list(&res);
+err_remove_irq_domain:
+	irq_domain_remove(rockchip->irq_domain);
+err_deinit_port:
+	rockchip_pcie_deinit_phys(rockchip);
+err_vpcie:
+	if (!IS_ERR(rockchip->vpcie12v))
+		regulator_disable(rockchip->vpcie12v);
+	if (!IS_ERR(rockchip->vpcie3v3))
+		regulator_disable(rockchip->vpcie3v3);
+	if (!IS_ERR(rockchip->vpcie1v8))
+		regulator_disable(rockchip->vpcie1v8);
+	if (!IS_ERR(rockchip->vpcie0v9))
+		regulator_disable(rockchip->vpcie0v9);
+err_set_vpcie:
+	rockchip_pcie_disable_clocks(rockchip);
+	return err;
+}
+
+static int rockchip_pcie_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
+
+	pci_stop_root_bus(rockchip->root_bus);
+	pci_remove_root_bus(rockchip->root_bus);
+	pci_unmap_iospace(rockchip->io);
+	irq_domain_remove(rockchip->irq_domain);
+
+	rockchip_pcie_deinit_phys(rockchip);
+
+	rockchip_pcie_disable_clocks(rockchip);
+
+	if (!IS_ERR(rockchip->vpcie12v))
+		regulator_disable(rockchip->vpcie12v);
+	if (!IS_ERR(rockchip->vpcie3v3))
+		regulator_disable(rockchip->vpcie3v3);
+	if (!IS_ERR(rockchip->vpcie1v8))
+		regulator_disable(rockchip->vpcie1v8);
+	if (!IS_ERR(rockchip->vpcie0v9))
+		regulator_disable(rockchip->vpcie0v9);
+
+	return 0;
+}
+
+static const struct dev_pm_ops rockchip_pcie_pm_ops = {
+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq,
+				      rockchip_pcie_resume_noirq)
+};
+
+static const struct of_device_id rockchip_pcie_of_match[] = {
+	{ .compatible = "rockchip,rk3399-pcie", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, rockchip_pcie_of_match);
+
+static struct platform_driver rockchip_pcie_driver = {
+	.driver = {
+		.name = "rockchip-pcie",
+		.of_match_table = rockchip_pcie_of_match,
+		.pm = &rockchip_pcie_pm_ops,
+	},
+	.probe = rockchip_pcie_probe,
+	.remove = rockchip_pcie_remove,
+};
+module_platform_driver(rockchip_pcie_driver);
+
+MODULE_AUTHOR("Rockchip Inc");
+MODULE_DESCRIPTION("Rockchip AXI PCIe driver");
+MODULE_LICENSE("GPL v2");
diff --git a/marvell/linux/drivers/pci/controller/pcie-rockchip.c b/marvell/linux/drivers/pci/controller/pcie-rockchip.c
new file mode 100644
index 0000000..6ab7ca0
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pcie-rockchip.c
@@ -0,0 +1,441 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Rockchip AXI PCIe host controller driver
+ *
+ * Copyright (c) 2016 Rockchip, Inc.
+ *
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ *         Wenrui Li <wenrui.li@rock-chips.com>
+ *
+ * Bits taken from Synopsys DesignWare Host controller driver and
+ * ARM PCI Host generic driver.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iopoll.h>
+#include <linux/of_pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "../pci.h"
+#include "pcie-rockchip.h"
+
+int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
+{
+	struct device *dev = rockchip->dev;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct device_node *node = dev->of_node;
+	struct resource *regs;
+	int err;
+
+	if (rockchip->is_rc) {
+		regs = platform_get_resource_byname(pdev,
+						    IORESOURCE_MEM,
+						    "axi-base");
+		rockchip->reg_base = devm_pci_remap_cfg_resource(dev, regs);
+		if (IS_ERR(rockchip->reg_base))
+			return PTR_ERR(rockchip->reg_base);
+	} else {
+		rockchip->mem_res =
+			platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						     "mem-base");
+		if (!rockchip->mem_res)
+			return -EINVAL;
+	}
+
+	regs = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					    "apb-base");
+	rockchip->apb_base = devm_ioremap_resource(dev, regs);
+	if (IS_ERR(rockchip->apb_base))
+		return PTR_ERR(rockchip->apb_base);
+
+	err = rockchip_pcie_get_phys(rockchip);
+	if (err)
+		return err;
+
+	rockchip->lanes = 1;
+	err = of_property_read_u32(node, "num-lanes", &rockchip->lanes);
+	if (!err && (rockchip->lanes == 0 ||
+		     rockchip->lanes == 3 ||
+		     rockchip->lanes > 4)) {
+		dev_warn(dev, "invalid num-lanes, default to use one lane\n");
+		rockchip->lanes = 1;
+	}
+
+	rockchip->link_gen = of_pci_get_max_link_speed(node);
+	if (rockchip->link_gen < 0 || rockchip->link_gen > 2)
+		rockchip->link_gen = 2;
+
+	rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core");
+	if (IS_ERR(rockchip->core_rst)) {
+		if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
+			dev_err(dev, "missing core reset property in node\n");
+		return PTR_ERR(rockchip->core_rst);
+	}
+
+	rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt");
+	if (IS_ERR(rockchip->mgmt_rst)) {
+		if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER)
+			dev_err(dev, "missing mgmt reset property in node\n");
+		return PTR_ERR(rockchip->mgmt_rst);
+	}
+
+	rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev,
+								"mgmt-sticky");
+	if (IS_ERR(rockchip->mgmt_sticky_rst)) {
+		if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER)
+			dev_err(dev, "missing mgmt-sticky reset property in node\n");
+		return PTR_ERR(rockchip->mgmt_sticky_rst);
+	}
+
+	rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe");
+	if (IS_ERR(rockchip->pipe_rst)) {
+		if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER)
+			dev_err(dev, "missing pipe reset property in node\n");
+		return PTR_ERR(rockchip->pipe_rst);
+	}
+
+	rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm");
+	if (IS_ERR(rockchip->pm_rst)) {
+		if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER)
+			dev_err(dev, "missing pm reset property in node\n");
+		return PTR_ERR(rockchip->pm_rst);
+	}
+
+	rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk");
+	if (IS_ERR(rockchip->pclk_rst)) {
+		if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER)
+			dev_err(dev, "missing pclk reset property in node\n");
+		return PTR_ERR(rockchip->pclk_rst);
+	}
+
+	rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk");
+	if (IS_ERR(rockchip->aclk_rst)) {
+		if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER)
+			dev_err(dev, "missing aclk reset property in node\n");
+		return PTR_ERR(rockchip->aclk_rst);
+	}
+
+	if (rockchip->is_rc) {
+		rockchip->ep_gpio = devm_gpiod_get_optional(dev, "ep",
+							    GPIOD_OUT_LOW);
+		if (IS_ERR(rockchip->ep_gpio))
+			return dev_err_probe(dev, PTR_ERR(rockchip->ep_gpio),
+					     "failed to get ep GPIO\n");
+	}
+
+	rockchip->aclk_pcie = devm_clk_get(dev, "aclk");
+	if (IS_ERR(rockchip->aclk_pcie)) {
+		dev_err(dev, "aclk clock not found\n");
+		return PTR_ERR(rockchip->aclk_pcie);
+	}
+
+	rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf");
+	if (IS_ERR(rockchip->aclk_perf_pcie)) {
+		dev_err(dev, "aclk_perf clock not found\n");
+		return PTR_ERR(rockchip->aclk_perf_pcie);
+	}
+
+	rockchip->hclk_pcie = devm_clk_get(dev, "hclk");
+	if (IS_ERR(rockchip->hclk_pcie)) {
+		dev_err(dev, "hclk clock not found\n");
+		return PTR_ERR(rockchip->hclk_pcie);
+	}
+
+	rockchip->clk_pcie_pm = devm_clk_get(dev, "pm");
+	if (IS_ERR(rockchip->clk_pcie_pm)) {
+		dev_err(dev, "pm clock not found\n");
+		return PTR_ERR(rockchip->clk_pcie_pm);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rockchip_pcie_parse_dt);
+
+#define rockchip_pcie_read_addr(addr) rockchip_pcie_read(rockchip, addr)
+/* 100 ms max wait time for PHY PLLs to lock */
+#define RK_PHY_PLL_LOCK_TIMEOUT_US 100000
+/* Sleep should be less than 20ms */
+#define RK_PHY_PLL_LOCK_SLEEP_US 1000
+
+int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
+{
+	struct device *dev = rockchip->dev;
+	int err, i;
+	u32 regs;
+
+	err = reset_control_assert(rockchip->aclk_rst);
+	if (err) {
+		dev_err(dev, "assert aclk_rst err %d\n", err);
+		return err;
+	}
+
+	err = reset_control_assert(rockchip->pclk_rst);
+	if (err) {
+		dev_err(dev, "assert pclk_rst err %d\n", err);
+		return err;
+	}
+
+	err = reset_control_assert(rockchip->pm_rst);
+	if (err) {
+		dev_err(dev, "assert pm_rst err %d\n", err);
+		return err;
+	}
+
+	for (i = 0; i < MAX_LANE_NUM; i++) {
+		err = phy_init(rockchip->phys[i]);
+		if (err) {
+			dev_err(dev, "init phy%d err %d\n", i, err);
+			goto err_exit_phy;
+		}
+	}
+
+	err = reset_control_assert(rockchip->core_rst);
+	if (err) {
+		dev_err(dev, "assert core_rst err %d\n", err);
+		goto err_exit_phy;
+	}
+
+	err = reset_control_assert(rockchip->mgmt_rst);
+	if (err) {
+		dev_err(dev, "assert mgmt_rst err %d\n", err);
+		goto err_exit_phy;
+	}
+
+	err = reset_control_assert(rockchip->mgmt_sticky_rst);
+	if (err) {
+		dev_err(dev, "assert mgmt_sticky_rst err %d\n", err);
+		goto err_exit_phy;
+	}
+
+	err = reset_control_assert(rockchip->pipe_rst);
+	if (err) {
+		dev_err(dev, "assert pipe_rst err %d\n", err);
+		goto err_exit_phy;
+	}
+
+	udelay(10);
+
+	err = reset_control_deassert(rockchip->pm_rst);
+	if (err) {
+		dev_err(dev, "deassert pm_rst err %d\n", err);
+		goto err_exit_phy;
+	}
+
+	err = reset_control_deassert(rockchip->aclk_rst);
+	if (err) {
+		dev_err(dev, "deassert aclk_rst err %d\n", err);
+		goto err_exit_phy;
+	}
+
+	err = reset_control_deassert(rockchip->pclk_rst);
+	if (err) {
+		dev_err(dev, "deassert pclk_rst err %d\n", err);
+		goto err_exit_phy;
+	}
+
+	if (rockchip->link_gen == 2)
+		rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_2,
+				    PCIE_CLIENT_CONFIG);
+	else
+		rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_1,
+				    PCIE_CLIENT_CONFIG);
+
+	regs = PCIE_CLIENT_LINK_TRAIN_ENABLE | PCIE_CLIENT_ARI_ENABLE |
+	       PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes);
+
+	if (rockchip->is_rc)
+		regs |= PCIE_CLIENT_CONF_ENABLE | PCIE_CLIENT_MODE_RC;
+	else
+		regs |= PCIE_CLIENT_CONF_DISABLE | PCIE_CLIENT_MODE_EP;
+
+	rockchip_pcie_write(rockchip, regs, PCIE_CLIENT_CONFIG);
+
+	for (i = 0; i < MAX_LANE_NUM; i++) {
+		err = phy_power_on(rockchip->phys[i]);
+		if (err) {
+			dev_err(dev, "power on phy%d err %d\n", i, err);
+			goto err_power_off_phy;
+		}
+	}
+
+	err = readx_poll_timeout(rockchip_pcie_read_addr,
+				 PCIE_CLIENT_SIDE_BAND_STATUS,
+				 regs, !(regs & PCIE_CLIENT_PHY_ST),
+				 RK_PHY_PLL_LOCK_SLEEP_US,
+				 RK_PHY_PLL_LOCK_TIMEOUT_US);
+	if (err) {
+		dev_err(dev, "PHY PLLs could not lock, %d\n", err);
+		goto err_power_off_phy;
+	}
+
+	/*
+	 * Please don't reorder the deassert sequence of the following
+	 * four reset pins.
+	 */
+	err = reset_control_deassert(rockchip->mgmt_sticky_rst);
+	if (err) {
+		dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
+		goto err_power_off_phy;
+	}
+
+	err = reset_control_deassert(rockchip->core_rst);
+	if (err) {
+		dev_err(dev, "deassert core_rst err %d\n", err);
+		goto err_power_off_phy;
+	}
+
+	err = reset_control_deassert(rockchip->mgmt_rst);
+	if (err) {
+		dev_err(dev, "deassert mgmt_rst err %d\n", err);
+		goto err_power_off_phy;
+	}
+
+	err = reset_control_deassert(rockchip->pipe_rst);
+	if (err) {
+		dev_err(dev, "deassert pipe_rst err %d\n", err);
+		goto err_power_off_phy;
+	}
+
+	return 0;
+err_power_off_phy:
+	while (i--)
+		phy_power_off(rockchip->phys[i]);
+	i = MAX_LANE_NUM;
+err_exit_phy:
+	while (i--)
+		phy_exit(rockchip->phys[i]);
+	return err;
+}
+EXPORT_SYMBOL_GPL(rockchip_pcie_init_port);
+
+int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip)
+{
+	struct device *dev = rockchip->dev;
+	struct phy *phy;
+	char *name;
+	u32 i;
+
+	phy = devm_phy_get(dev, "pcie-phy");
+	if (!IS_ERR(phy)) {
+		rockchip->legacy_phy = true;
+		rockchip->phys[0] = phy;
+		dev_warn(dev, "legacy phy model is deprecated!\n");
+		return 0;
+	}
+
+	if (PTR_ERR(phy) == -EPROBE_DEFER)
+		return PTR_ERR(phy);
+
+	dev_dbg(dev, "missing legacy phy; search for per-lane PHY\n");
+
+	for (i = 0; i < MAX_LANE_NUM; i++) {
+		name = kasprintf(GFP_KERNEL, "pcie-phy-%u", i);
+		if (!name)
+			return -ENOMEM;
+
+		phy = devm_of_phy_get(dev, dev->of_node, name);
+		kfree(name);
+
+		if (IS_ERR(phy)) {
+			if (PTR_ERR(phy) != -EPROBE_DEFER)
+				dev_err(dev, "missing phy for lane %d: %ld\n",
+					i, PTR_ERR(phy));
+			return PTR_ERR(phy);
+		}
+
+		rockchip->phys[i] = phy;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rockchip_pcie_get_phys);
+
+void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip)
+{
+	int i;
+
+	for (i = 0; i < MAX_LANE_NUM; i++) {
+		/* inactive lanes are already powered off */
+		if (rockchip->lanes_map & BIT(i))
+			phy_power_off(rockchip->phys[i]);
+		phy_exit(rockchip->phys[i]);
+	}
+}
+EXPORT_SYMBOL_GPL(rockchip_pcie_deinit_phys);
+
+int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip)
+{
+	struct device *dev = rockchip->dev;
+	int err;
+
+	err = clk_prepare_enable(rockchip->aclk_pcie);
+	if (err) {
+		dev_err(dev, "unable to enable aclk_pcie clock\n");
+		return err;
+	}
+
+	err = clk_prepare_enable(rockchip->aclk_perf_pcie);
+	if (err) {
+		dev_err(dev, "unable to enable aclk_perf_pcie clock\n");
+		goto err_aclk_perf_pcie;
+	}
+
+	err = clk_prepare_enable(rockchip->hclk_pcie);
+	if (err) {
+		dev_err(dev, "unable to enable hclk_pcie clock\n");
+		goto err_hclk_pcie;
+	}
+
+	err = clk_prepare_enable(rockchip->clk_pcie_pm);
+	if (err) {
+		dev_err(dev, "unable to enable clk_pcie_pm clock\n");
+		goto err_clk_pcie_pm;
+	}
+
+	return 0;
+
+err_clk_pcie_pm:
+	clk_disable_unprepare(rockchip->hclk_pcie);
+err_hclk_pcie:
+	clk_disable_unprepare(rockchip->aclk_perf_pcie);
+err_aclk_perf_pcie:
+	clk_disable_unprepare(rockchip->aclk_pcie);
+	return err;
+}
+EXPORT_SYMBOL_GPL(rockchip_pcie_enable_clocks);
+
+void rockchip_pcie_disable_clocks(void *data)
+{
+	struct rockchip_pcie *rockchip = data;
+
+	clk_disable_unprepare(rockchip->clk_pcie_pm);
+	clk_disable_unprepare(rockchip->hclk_pcie);
+	clk_disable_unprepare(rockchip->aclk_perf_pcie);
+	clk_disable_unprepare(rockchip->aclk_pcie);
+}
+EXPORT_SYMBOL_GPL(rockchip_pcie_disable_clocks);
+
+void rockchip_pcie_cfg_configuration_accesses(
+		struct rockchip_pcie *rockchip, u32 type)
+{
+	u32 ob_desc_0;
+
+	/* Configuration Accesses for region 0 */
+	rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF);
+
+	rockchip_pcie_write(rockchip,
+			    (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS),
+			    PCIE_CORE_OB_REGION_ADDR0);
+	rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H,
+			    PCIE_CORE_OB_REGION_ADDR1);
+	ob_desc_0 = rockchip_pcie_read(rockchip, PCIE_CORE_OB_REGION_DESC0);
+	ob_desc_0 &= ~(RC_REGION_0_TYPE_MASK);
+	ob_desc_0 |= (type | (0x1 << 23));
+	rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0);
+	rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1);
+}
+EXPORT_SYMBOL_GPL(rockchip_pcie_cfg_configuration_accesses);
diff --git a/marvell/linux/drivers/pci/controller/pcie-rockchip.h b/marvell/linux/drivers/pci/controller/pcie-rockchip.h
new file mode 100644
index 0000000..de24b04
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pcie-rockchip.h
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Rockchip AXI PCIe controller driver
+ *
+ * Copyright (c) 2018 Rockchip, Inc.
+ *
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ *
+ */
+
+#ifndef _PCIE_ROCKCHIP_H
+#define _PCIE_ROCKCHIP_H
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+
+/*
+ * The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16
+ * bits.  This allows atomic updates of the register without locking.
+ */
+#define HIWORD_UPDATE(mask, val)	(((mask) << 16) | (val))
+#define HIWORD_UPDATE_BIT(val)		HIWORD_UPDATE(val, val)
+
+#define ENCODE_LANES(x)			((((x) >> 1) & 3) << 4)
+#define MAX_LANE_NUM			4
+#define MAX_REGION_LIMIT		32
+#define MIN_EP_APERTURE			28
+
+#define PCIE_CLIENT_BASE		0x0
+#define PCIE_CLIENT_CONFIG		(PCIE_CLIENT_BASE + 0x00)
+#define   PCIE_CLIENT_CONF_ENABLE	  HIWORD_UPDATE_BIT(0x0001)
+#define   PCIE_CLIENT_CONF_DISABLE       HIWORD_UPDATE(0x0001, 0)
+#define   PCIE_CLIENT_LINK_TRAIN_ENABLE	  HIWORD_UPDATE_BIT(0x0002)
+#define   PCIE_CLIENT_ARI_ENABLE	  HIWORD_UPDATE_BIT(0x0008)
+#define   PCIE_CLIENT_CONF_LANE_NUM(x)	  HIWORD_UPDATE(0x0030, ENCODE_LANES(x))
+#define   PCIE_CLIENT_MODE_RC		  HIWORD_UPDATE_BIT(0x0040)
+#define   PCIE_CLIENT_MODE_EP            HIWORD_UPDATE(0x0040, 0)
+#define   PCIE_CLIENT_GEN_SEL_1		  HIWORD_UPDATE(0x0080, 0)
+#define   PCIE_CLIENT_GEN_SEL_2		  HIWORD_UPDATE_BIT(0x0080)
+#define PCIE_CLIENT_LEGACY_INT_CTRL	(PCIE_CLIENT_BASE + 0x0c)
+#define   PCIE_CLIENT_INT_IN_ASSERT		HIWORD_UPDATE_BIT(0x0002)
+#define   PCIE_CLIENT_INT_IN_DEASSERT		HIWORD_UPDATE(0x0002, 0)
+#define   PCIE_CLIENT_INT_PEND_ST_PEND		HIWORD_UPDATE_BIT(0x0001)
+#define   PCIE_CLIENT_INT_PEND_ST_NORMAL	HIWORD_UPDATE(0x0001, 0)
+#define PCIE_CLIENT_SIDE_BAND_STATUS	(PCIE_CLIENT_BASE + 0x20)
+#define   PCIE_CLIENT_PHY_ST			BIT(12)
+#define PCIE_CLIENT_DEBUG_OUT_0		(PCIE_CLIENT_BASE + 0x3c)
+#define   PCIE_CLIENT_DEBUG_LTSSM_MASK		GENMASK(5, 0)
+#define   PCIE_CLIENT_DEBUG_LTSSM_L1		0x18
+#define   PCIE_CLIENT_DEBUG_LTSSM_L2		0x19
+#define PCIE_CLIENT_BASIC_STATUS1	(PCIE_CLIENT_BASE + 0x48)
+#define   PCIE_CLIENT_LINK_STATUS_UP		0x00300000
+#define   PCIE_CLIENT_LINK_STATUS_MASK		0x00300000
+#define PCIE_CLIENT_INT_MASK		(PCIE_CLIENT_BASE + 0x4c)
+#define PCIE_CLIENT_INT_STATUS		(PCIE_CLIENT_BASE + 0x50)
+#define   PCIE_CLIENT_INTR_MASK			GENMASK(8, 5)
+#define   PCIE_CLIENT_INTR_SHIFT		5
+#define   PCIE_CLIENT_INT_LEGACY_DONE		BIT(15)
+#define   PCIE_CLIENT_INT_MSG			BIT(14)
+#define   PCIE_CLIENT_INT_HOT_RST		BIT(13)
+#define   PCIE_CLIENT_INT_DPA			BIT(12)
+#define   PCIE_CLIENT_INT_FATAL_ERR		BIT(11)
+#define   PCIE_CLIENT_INT_NFATAL_ERR		BIT(10)
+#define   PCIE_CLIENT_INT_CORR_ERR		BIT(9)
+#define   PCIE_CLIENT_INT_INTD			BIT(8)
+#define   PCIE_CLIENT_INT_INTC			BIT(7)
+#define   PCIE_CLIENT_INT_INTB			BIT(6)
+#define   PCIE_CLIENT_INT_INTA			BIT(5)
+#define   PCIE_CLIENT_INT_LOCAL			BIT(4)
+#define   PCIE_CLIENT_INT_UDMA			BIT(3)
+#define   PCIE_CLIENT_INT_PHY			BIT(2)
+#define   PCIE_CLIENT_INT_HOT_PLUG		BIT(1)
+#define   PCIE_CLIENT_INT_PWR_STCG		BIT(0)
+
+#define PCIE_CLIENT_INT_LEGACY \
+	(PCIE_CLIENT_INT_INTA | PCIE_CLIENT_INT_INTB | \
+	PCIE_CLIENT_INT_INTC | PCIE_CLIENT_INT_INTD)
+
+#define PCIE_CLIENT_INT_CLI \
+	(PCIE_CLIENT_INT_CORR_ERR | PCIE_CLIENT_INT_NFATAL_ERR | \
+	PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_DPA | \
+	PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_MSG | \
+	PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_LEGACY | \
+	PCIE_CLIENT_INT_PHY)
+
+#define PCIE_CORE_CTRL_MGMT_BASE	0x900000
+#define PCIE_CORE_CTRL			(PCIE_CORE_CTRL_MGMT_BASE + 0x000)
+#define   PCIE_CORE_PL_CONF_SPEED_5G		0x00000008
+#define   PCIE_CORE_PL_CONF_SPEED_MASK		0x00000018
+#define   PCIE_CORE_PL_CONF_LANE_MASK		0x00000006
+#define   PCIE_CORE_PL_CONF_LANE_SHIFT		1
+#define PCIE_CORE_CTRL_PLC1		(PCIE_CORE_CTRL_MGMT_BASE + 0x004)
+#define   PCIE_CORE_CTRL_PLC1_FTS_MASK		GENMASK(23, 8)
+#define   PCIE_CORE_CTRL_PLC1_FTS_SHIFT		8
+#define   PCIE_CORE_CTRL_PLC1_FTS_CNT		0xffff
+#define PCIE_CORE_TXCREDIT_CFG1		(PCIE_CORE_CTRL_MGMT_BASE + 0x020)
+#define   PCIE_CORE_TXCREDIT_CFG1_MUI_MASK	0xFFFF0000
+#define   PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT	16
+#define   PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(x) \
+		(((x) >> 3) << PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT)
+#define PCIE_CORE_LANE_MAP             (PCIE_CORE_CTRL_MGMT_BASE + 0x200)
+#define   PCIE_CORE_LANE_MAP_MASK              0x0000000f
+#define   PCIE_CORE_LANE_MAP_REVERSE           BIT(16)
+#define PCIE_CORE_INT_STATUS		(PCIE_CORE_CTRL_MGMT_BASE + 0x20c)
+#define   PCIE_CORE_INT_PRFPE			BIT(0)
+#define   PCIE_CORE_INT_CRFPE			BIT(1)
+#define   PCIE_CORE_INT_RRPE			BIT(2)
+#define   PCIE_CORE_INT_PRFO			BIT(3)
+#define   PCIE_CORE_INT_CRFO			BIT(4)
+#define   PCIE_CORE_INT_RT			BIT(5)
+#define   PCIE_CORE_INT_RTR			BIT(6)
+#define   PCIE_CORE_INT_PE			BIT(7)
+#define   PCIE_CORE_INT_MTR			BIT(8)
+#define   PCIE_CORE_INT_UCR			BIT(9)
+#define   PCIE_CORE_INT_FCE			BIT(10)
+#define   PCIE_CORE_INT_CT			BIT(11)
+#define   PCIE_CORE_INT_UTC			BIT(18)
+#define   PCIE_CORE_INT_MMVC			BIT(19)
+#define PCIE_CORE_CONFIG_VENDOR		(PCIE_CORE_CTRL_MGMT_BASE + 0x44)
+#define PCIE_CORE_INT_MASK		(PCIE_CORE_CTRL_MGMT_BASE + 0x210)
+#define PCIE_CORE_PHY_FUNC_CFG		(PCIE_CORE_CTRL_MGMT_BASE + 0x2c0)
+#define PCIE_RC_BAR_CONF		(PCIE_CORE_CTRL_MGMT_BASE + 0x300)
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED		0x0
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS		0x1
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS		0x4
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS	0x5
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS		0x6
+#define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS	0x7
+
+#define PCIE_CORE_INT \
+		(PCIE_CORE_INT_PRFPE | PCIE_CORE_INT_CRFPE | \
+		 PCIE_CORE_INT_RRPE | PCIE_CORE_INT_CRFO | \
+		 PCIE_CORE_INT_RT | PCIE_CORE_INT_RTR | \
+		 PCIE_CORE_INT_PE | PCIE_CORE_INT_MTR | \
+		 PCIE_CORE_INT_UCR | PCIE_CORE_INT_FCE | \
+		 PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \
+		 PCIE_CORE_INT_MMVC)
+
+#define PCIE_RC_RP_ATS_BASE		0x400000
+#define PCIE_RC_CONFIG_NORMAL_BASE	0x800000
+#define PCIE_RC_CONFIG_BASE		0xa00000
+#define PCIE_EP_CONFIG_BASE		0xa00000
+#define PCIE_EP_CONFIG_DID_VID		(PCIE_EP_CONFIG_BASE + 0x00)
+#define PCIE_RC_CONFIG_RID_CCR		(PCIE_RC_CONFIG_BASE + 0x08)
+#define   PCIE_RC_CONFIG_SCC_SHIFT		16
+#define PCIE_RC_CONFIG_DCR		(PCIE_RC_CONFIG_BASE + 0xc4)
+#define   PCIE_RC_CONFIG_DCR_CSPL_SHIFT		18
+#define   PCIE_RC_CONFIG_DCR_CSPL_LIMIT		0xff
+#define   PCIE_RC_CONFIG_DCR_CPLS_SHIFT		26
+#define PCIE_RC_CONFIG_DCSR		(PCIE_RC_CONFIG_BASE + 0xc8)
+#define   PCIE_RC_CONFIG_DCSR_MPS_MASK		GENMASK(7, 5)
+#define   PCIE_RC_CONFIG_DCSR_MPS_256		(0x1 << 5)
+#define PCIE_RC_CONFIG_LINK_CAP		(PCIE_RC_CONFIG_BASE + 0xcc)
+#define   PCIE_RC_CONFIG_LINK_CAP_L0S		BIT(10)
+#define PCIE_RC_CONFIG_LCS		(PCIE_RC_CONFIG_BASE + 0xd0)
+#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c)
+#define PCIE_RC_CONFIG_THP_CAP		(PCIE_RC_CONFIG_BASE + 0x274)
+#define   PCIE_RC_CONFIG_THP_CAP_NEXT_MASK	GENMASK(31, 20)
+
+#define PCIE_CORE_AXI_CONF_BASE		0xc00000
+#define PCIE_CORE_OB_REGION_ADDR0	(PCIE_CORE_AXI_CONF_BASE + 0x0)
+#define   PCIE_CORE_OB_REGION_ADDR0_NUM_BITS	0x3f
+#define   PCIE_CORE_OB_REGION_ADDR0_LO_ADDR	0xffffff00
+#define PCIE_CORE_OB_REGION_ADDR1	(PCIE_CORE_AXI_CONF_BASE + 0x4)
+#define PCIE_CORE_OB_REGION_DESC0	(PCIE_CORE_AXI_CONF_BASE + 0x8)
+#define PCIE_CORE_OB_REGION_DESC1	(PCIE_CORE_AXI_CONF_BASE + 0xc)
+
+#define PCIE_CORE_AXI_INBOUND_BASE	0xc00800
+#define PCIE_RP_IB_ADDR0		(PCIE_CORE_AXI_INBOUND_BASE + 0x0)
+#define   PCIE_CORE_IB_REGION_ADDR0_NUM_BITS	0x3f
+#define   PCIE_CORE_IB_REGION_ADDR0_LO_ADDR	0xffffff00
+#define PCIE_RP_IB_ADDR1		(PCIE_CORE_AXI_INBOUND_BASE + 0x4)
+
+/* Size of one AXI Region (not Region 0) */
+#define AXI_REGION_SIZE				BIT(20)
+/* Size of Region 0, equal to sum of sizes of other regions */
+#define AXI_REGION_0_SIZE			(32 * (0x1 << 20))
+#define OB_REG_SIZE_SHIFT			5
+#define IB_ROOT_PORT_REG_SIZE_SHIFT		3
+#define AXI_WRAPPER_IO_WRITE			0x6
+#define AXI_WRAPPER_MEM_WRITE			0x2
+#define AXI_WRAPPER_TYPE0_CFG			0xa
+#define AXI_WRAPPER_TYPE1_CFG			0xb
+#define AXI_WRAPPER_NOR_MSG			0xc
+
+#define MAX_AXI_IB_ROOTPORT_REGION_NUM		3
+#define MIN_AXI_ADDR_BITS_PASSED		8
+#define PCIE_RC_SEND_PME_OFF			0x11960
+#define ROCKCHIP_VENDOR_ID			0x1d87
+#define PCIE_ECAM_BUS(x)			(((x) & 0xff) << 20)
+#define PCIE_ECAM_DEV(x)			(((x) & 0x1f) << 15)
+#define PCIE_ECAM_FUNC(x)			(((x) & 0x7) << 12)
+#define PCIE_ECAM_REG(x)			(((x) & 0xfff) << 0)
+#define PCIE_ECAM_ADDR(bus, dev, func, reg) \
+	  (PCIE_ECAM_BUS(bus) | PCIE_ECAM_DEV(dev) | \
+	   PCIE_ECAM_FUNC(func) | PCIE_ECAM_REG(reg))
+#define PCIE_LINK_IS_L2(x) \
+	(((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2)
+#define PCIE_LINK_UP(x) \
+	(((x) & PCIE_CLIENT_LINK_STATUS_MASK) == PCIE_CLIENT_LINK_STATUS_UP)
+#define PCIE_LINK_IS_GEN2(x) \
+	(((x) & PCIE_CORE_PL_CONF_SPEED_MASK) == PCIE_CORE_PL_CONF_SPEED_5G)
+
+#define RC_REGION_0_ADDR_TRANS_H		0x00000000
+#define RC_REGION_0_ADDR_TRANS_L		0x00000000
+#define RC_REGION_0_PASS_BITS			(25 - 1)
+#define RC_REGION_0_TYPE_MASK			GENMASK(3, 0)
+#define MAX_AXI_WRAPPER_REGION_NUM		33
+
+#define ROCKCHIP_PCIE_MSG_ROUTING_TO_RC		0x0
+#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ADDR		0x1
+#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ID		0x2
+#define ROCKCHIP_PCIE_MSG_ROUTING_BROADCAST		0x3
+#define ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX		0x4
+#define ROCKCHIP_PCIE_MSG_ROUTING_PME_ACK		0x5
+#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA		0x20
+#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTB		0x21
+#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTC		0x22
+#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTD		0x23
+#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA		0x24
+#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTB		0x25
+#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTC		0x26
+#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTD		0x27
+#define ROCKCHIP_PCIE_MSG_ROUTING_MASK			GENMASK(7, 5)
+#define ROCKCHIP_PCIE_MSG_ROUTING(route) \
+	(((route) << 5) & ROCKCHIP_PCIE_MSG_ROUTING_MASK)
+#define ROCKCHIP_PCIE_MSG_CODE_MASK			GENMASK(15, 8)
+#define ROCKCHIP_PCIE_MSG_CODE(code) \
+	(((code) << 8) & ROCKCHIP_PCIE_MSG_CODE_MASK)
+#define ROCKCHIP_PCIE_MSG_NO_DATA			BIT(16)
+
+#define ROCKCHIP_PCIE_EP_CMD_STATUS			0x4
+#define   ROCKCHIP_PCIE_EP_CMD_STATUS_IS		BIT(19)
+#define ROCKCHIP_PCIE_EP_MSI_CTRL_REG			0x90
+#define   ROCKCHIP_PCIE_EP_MSI_FLAGS_OFFSET		16
+#define   ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET		17
+#define   ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK		GENMASK(19, 17)
+#define   ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET		20
+#define   ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK		GENMASK(22, 20)
+#define   ROCKCHIP_PCIE_EP_MSI_CTRL_ME				BIT(16)
+#define   ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP	BIT(24)
+#define ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR				0x1
+#define ROCKCHIP_PCIE_EP_FUNC_BASE(fn)	(((fn) << 12) & GENMASK(19, 12))
+
+#define ROCKCHIP_PCIE_AT_MIN_NUM_BITS  8
+#define ROCKCHIP_PCIE_AT_MAX_NUM_BITS  20
+
+#define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
+	(PCIE_RC_RP_ATS_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008)
+#define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
+	(PCIE_RC_RP_ATS_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
+	(PCIE_RC_RP_ATS_BASE + 0x0000 + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK	GENMASK(19, 12)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
+	(((devfn) << 12) & \
+		 ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK	GENMASK(27, 20)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
+		(((bus) << 20) & ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK)
+#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r) \
+		(PCIE_RC_RP_ATS_BASE + 0x0004 + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID	BIT(23)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK	GENMASK(31, 24)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \
+		(((devfn) << 24) & ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r) \
+		(PCIE_RC_RP_ATS_BASE + 0x0008 + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r)	\
+		(PCIE_RC_RP_ATS_BASE + 0x000c + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(r) \
+		(PCIE_RC_RP_ATS_BASE + 0x0018 + ((r) & 0x1f) * 0x0020)
+#define ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r) \
+		(PCIE_RC_RP_ATS_BASE + 0x001c + ((r) & 0x1f) * 0x0020)
+
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn) \
+		(PCIE_CORE_CTRL_MGMT_BASE + 0x0240 + (fn) * 0x0008)
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn) \
+		(PCIE_CORE_CTRL_MGMT_BASE + 0x0244 + (fn) * 0x0008)
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
+		(GENMASK(4, 0) << ((b) * 8))
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
+		(((a) << ((b) * 8)) & \
+		 ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b))
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \
+		(GENMASK(7, 5) << ((b) * 8))
+#define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \
+		(((c) << ((b) * 8 + 5)) & \
+		 ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
+
+struct rockchip_pcie {
+	void	__iomem *reg_base;		/* DT axi-base */
+	void	__iomem *apb_base;		/* DT apb-base */
+	bool    legacy_phy;
+	struct  phy *phys[MAX_LANE_NUM];
+	struct	reset_control *core_rst;
+	struct	reset_control *mgmt_rst;
+	struct	reset_control *mgmt_sticky_rst;
+	struct	reset_control *pipe_rst;
+	struct	reset_control *pm_rst;
+	struct	reset_control *aclk_rst;
+	struct	reset_control *pclk_rst;
+	struct	clk *aclk_pcie;
+	struct	clk *aclk_perf_pcie;
+	struct	clk *hclk_pcie;
+	struct	clk *clk_pcie_pm;
+	struct	regulator *vpcie12v; /* 12V power supply */
+	struct	regulator *vpcie3v3; /* 3.3V power supply */
+	struct	regulator *vpcie1v8; /* 1.8V power supply */
+	struct	regulator *vpcie0v9; /* 0.9V power supply */
+	struct	gpio_desc *ep_gpio;
+	u32	lanes;
+	u8      lanes_map;
+	u8	root_bus_nr;
+	int	link_gen;
+	struct	device *dev;
+	struct	irq_domain *irq_domain;
+	int     offset;
+	struct pci_bus *root_bus;
+	struct resource *io;
+	phys_addr_t io_bus_addr;
+	u32     io_size;
+	void    __iomem *msg_region;
+	u32     mem_size;
+	phys_addr_t msg_bus_addr;
+	phys_addr_t mem_bus_addr;
+	bool is_rc;
+	struct resource *mem_res;
+};
+
+static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg)
+{
+	return readl(rockchip->apb_base + reg);
+}
+
+static void rockchip_pcie_write(struct rockchip_pcie *rockchip, u32 val,
+				u32 reg)
+{
+	writel(val, rockchip->apb_base + reg);
+}
+
+int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip);
+int rockchip_pcie_init_port(struct rockchip_pcie *rockchip);
+int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip);
+void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip);
+int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip);
+void rockchip_pcie_disable_clocks(void *data);
+void rockchip_pcie_cfg_configuration_accesses(
+		struct rockchip_pcie *rockchip, u32 type);
+
+#endif /* _PCIE_ROCKCHIP_H */
diff --git a/marvell/linux/drivers/pci/controller/pcie-tango.c b/marvell/linux/drivers/pci/controller/pcie-tango.c
new file mode 100644
index 0000000..21a208d
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pcie-tango.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/pci-ecam.h>
+#include <linux/delay.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+
+#define MSI_MAX			256
+
+#define SMP8759_MUX		0x48
+#define SMP8759_TEST_OUT	0x74
+#define SMP8759_DOORBELL	0x7c
+#define SMP8759_STATUS		0x80
+#define SMP8759_ENABLE		0xa0
+
+struct tango_pcie {
+	DECLARE_BITMAP(used_msi, MSI_MAX);
+	u64			msi_doorbell;
+	spinlock_t		used_msi_lock;
+	void __iomem		*base;
+	struct irq_domain	*dom;
+};
+
+static void tango_msi_isr(struct irq_desc *desc)
+{
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct tango_pcie *pcie = irq_desc_get_handler_data(desc);
+	unsigned long status, base, virq, idx, pos = 0;
+
+	chained_irq_enter(chip, desc);
+	spin_lock(&pcie->used_msi_lock);
+
+	while ((pos = find_next_bit(pcie->used_msi, MSI_MAX, pos)) < MSI_MAX) {
+		base = round_down(pos, 32);
+		status = readl_relaxed(pcie->base + SMP8759_STATUS + base / 8);
+		for_each_set_bit(idx, &status, 32) {
+			virq = irq_find_mapping(pcie->dom, base + idx);
+			generic_handle_irq(virq);
+		}
+		pos = base + 32;
+	}
+
+	spin_unlock(&pcie->used_msi_lock);
+	chained_irq_exit(chip, desc);
+}
+
+static void tango_ack(struct irq_data *d)
+{
+	struct tango_pcie *pcie = d->chip_data;
+	u32 offset = (d->hwirq / 32) * 4;
+	u32 bit = BIT(d->hwirq % 32);
+
+	writel_relaxed(bit, pcie->base + SMP8759_STATUS + offset);
+}
+
+static void update_msi_enable(struct irq_data *d, bool unmask)
+{
+	unsigned long flags;
+	struct tango_pcie *pcie = d->chip_data;
+	u32 offset = (d->hwirq / 32) * 4;
+	u32 bit = BIT(d->hwirq % 32);
+	u32 val;
+
+	spin_lock_irqsave(&pcie->used_msi_lock, flags);
+	val = readl_relaxed(pcie->base + SMP8759_ENABLE + offset);
+	val = unmask ? val | bit : val & ~bit;
+	writel_relaxed(val, pcie->base + SMP8759_ENABLE + offset);
+	spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
+}
+
+static void tango_mask(struct irq_data *d)
+{
+	update_msi_enable(d, false);
+}
+
+static void tango_unmask(struct irq_data *d)
+{
+	update_msi_enable(d, true);
+}
+
+static int tango_set_affinity(struct irq_data *d, const struct cpumask *mask,
+			      bool force)
+{
+	return -EINVAL;
+}
+
+static void tango_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+	struct tango_pcie *pcie = d->chip_data;
+	msg->address_lo = lower_32_bits(pcie->msi_doorbell);
+	msg->address_hi = upper_32_bits(pcie->msi_doorbell);
+	msg->data = d->hwirq;
+}
+
+static struct irq_chip tango_chip = {
+	.irq_ack		= tango_ack,
+	.irq_mask		= tango_mask,
+	.irq_unmask		= tango_unmask,
+	.irq_set_affinity	= tango_set_affinity,
+	.irq_compose_msi_msg	= tango_compose_msi_msg,
+};
+
+static void msi_ack(struct irq_data *d)
+{
+	irq_chip_ack_parent(d);
+}
+
+static void msi_mask(struct irq_data *d)
+{
+	pci_msi_mask_irq(d);
+	irq_chip_mask_parent(d);
+}
+
+static void msi_unmask(struct irq_data *d)
+{
+	pci_msi_unmask_irq(d);
+	irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip msi_chip = {
+	.name = "MSI",
+	.irq_ack = msi_ack,
+	.irq_mask = msi_mask,
+	.irq_unmask = msi_unmask,
+};
+
+static struct msi_domain_info msi_dom_info = {
+	.flags	= MSI_FLAG_PCI_MSIX
+		| MSI_FLAG_USE_DEF_DOM_OPS
+		| MSI_FLAG_USE_DEF_CHIP_OPS,
+	.chip	= &msi_chip,
+};
+
+static int tango_irq_domain_alloc(struct irq_domain *dom, unsigned int virq,
+				  unsigned int nr_irqs, void *args)
+{
+	struct tango_pcie *pcie = dom->host_data;
+	unsigned long flags;
+	int pos;
+
+	spin_lock_irqsave(&pcie->used_msi_lock, flags);
+	pos = find_first_zero_bit(pcie->used_msi, MSI_MAX);
+	if (pos >= MSI_MAX) {
+		spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
+		return -ENOSPC;
+	}
+	__set_bit(pos, pcie->used_msi);
+	spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
+	irq_domain_set_info(dom, virq, pos, &tango_chip,
+			pcie, handle_edge_irq, NULL, NULL);
+
+	return 0;
+}
+
+static void tango_irq_domain_free(struct irq_domain *dom, unsigned int virq,
+				  unsigned int nr_irqs)
+{
+	unsigned long flags;
+	struct irq_data *d = irq_domain_get_irq_data(dom, virq);
+	struct tango_pcie *pcie = d->chip_data;
+
+	spin_lock_irqsave(&pcie->used_msi_lock, flags);
+	__clear_bit(d->hwirq, pcie->used_msi);
+	spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
+}
+
+static const struct irq_domain_ops dom_ops = {
+	.alloc	= tango_irq_domain_alloc,
+	.free	= tango_irq_domain_free,
+};
+
+static int smp8759_config_read(struct pci_bus *bus, unsigned int devfn,
+			       int where, int size, u32 *val)
+{
+	struct pci_config_window *cfg = bus->sysdata;
+	struct tango_pcie *pcie = dev_get_drvdata(cfg->parent);
+	int ret;
+
+	/* Reads in configuration space outside devfn 0 return garbage */
+	if (devfn != 0)
+		return PCIBIOS_FUNC_NOT_SUPPORTED;
+
+	/*
+	 * PCI config and MMIO accesses are muxed.  Linux doesn't have a
+	 * mutual exclusion mechanism for config vs. MMIO accesses, so
+	 * concurrent accesses may cause corruption.
+	 */
+	writel_relaxed(1, pcie->base + SMP8759_MUX);
+	ret = pci_generic_config_read(bus, devfn, where, size, val);
+	writel_relaxed(0, pcie->base + SMP8759_MUX);
+
+	return ret;
+}
+
+static int smp8759_config_write(struct pci_bus *bus, unsigned int devfn,
+				int where, int size, u32 val)
+{
+	struct pci_config_window *cfg = bus->sysdata;
+	struct tango_pcie *pcie = dev_get_drvdata(cfg->parent);
+	int ret;
+
+	writel_relaxed(1, pcie->base + SMP8759_MUX);
+	ret = pci_generic_config_write(bus, devfn, where, size, val);
+	writel_relaxed(0, pcie->base + SMP8759_MUX);
+
+	return ret;
+}
+
+static struct pci_ecam_ops smp8759_ecam_ops = {
+	.bus_shift	= 20,
+	.pci_ops	= {
+		.map_bus	= pci_ecam_map_bus,
+		.read		= smp8759_config_read,
+		.write		= smp8759_config_write,
+	}
+};
+
+static int tango_pcie_link_up(struct tango_pcie *pcie)
+{
+	void __iomem *test_out = pcie->base + SMP8759_TEST_OUT;
+	int i;
+
+	writel_relaxed(16, test_out);
+	for (i = 0; i < 10; ++i) {
+		u32 ltssm_state = readl_relaxed(test_out) >> 8;
+		if ((ltssm_state & 0x1f) == 0xf) /* L0 */
+			return 1;
+		usleep_range(3000, 4000);
+	}
+
+	return 0;
+}
+
+static int tango_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct tango_pcie *pcie;
+	struct resource *res;
+	struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
+	struct irq_domain *msi_dom, *irq_dom;
+	struct of_pci_range_parser parser;
+	struct of_pci_range range;
+	int virq, offset;
+
+	dev_warn(dev, "simultaneous PCI config and MMIO accesses may cause data corruption\n");
+	add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
+
+	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+	if (!pcie)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	pcie->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pcie->base))
+		return PTR_ERR(pcie->base);
+
+	platform_set_drvdata(pdev, pcie);
+
+	if (!tango_pcie_link_up(pcie))
+		return -ENODEV;
+
+	if (of_pci_dma_range_parser_init(&parser, dev->of_node) < 0)
+		return -ENOENT;
+
+	if (of_pci_range_parser_one(&parser, &range) == NULL)
+		return -ENOENT;
+
+	range.pci_addr += range.size;
+	pcie->msi_doorbell = range.pci_addr + res->start + SMP8759_DOORBELL;
+
+	for (offset = 0; offset < MSI_MAX / 8; offset += 4)
+		writel_relaxed(0, pcie->base + SMP8759_ENABLE + offset);
+
+	virq = platform_get_irq(pdev, 1);
+	if (virq <= 0) {
+		dev_err(dev, "Failed to map IRQ\n");
+		return -ENXIO;
+	}
+
+	irq_dom = irq_domain_create_linear(fwnode, MSI_MAX, &dom_ops, pcie);
+	if (!irq_dom) {
+		dev_err(dev, "Failed to create IRQ domain\n");
+		return -ENOMEM;
+	}
+
+	msi_dom = pci_msi_create_irq_domain(fwnode, &msi_dom_info, irq_dom);
+	if (!msi_dom) {
+		dev_err(dev, "Failed to create MSI domain\n");
+		irq_domain_remove(irq_dom);
+		return -ENOMEM;
+	}
+
+	pcie->dom = irq_dom;
+	spin_lock_init(&pcie->used_msi_lock);
+	irq_set_chained_handler_and_data(virq, tango_msi_isr, pcie);
+
+	return pci_host_common_probe(pdev, &smp8759_ecam_ops);
+}
+
+static const struct of_device_id tango_pcie_ids[] = {
+	{ .compatible = "sigma,smp8759-pcie" },
+	{ },
+};
+
+static struct platform_driver tango_pcie_driver = {
+	.probe	= tango_pcie_probe,
+	.driver	= {
+		.name = KBUILD_MODNAME,
+		.of_match_table = tango_pcie_ids,
+		.suppress_bind_attrs = true,
+	},
+};
+builtin_platform_driver(tango_pcie_driver);
+
+/*
+ * The root complex advertises the wrong device class.
+ * Header Type 1 is for PCI-to-PCI bridges.
+ */
+static void tango_fixup_class(struct pci_dev *dev)
+{
+	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0024, tango_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0028, tango_fixup_class);
+
+/*
+ * The root complex exposes a "fake" BAR, which is used to filter
+ * bus-to-system accesses.  Only accesses within the range defined by this
+ * BAR are forwarded to the host, others are ignored.
+ *
+ * By default, the DMA framework expects an identity mapping, and DRAM0 is
+ * mapped at 0x80000000.
+ */
+static void tango_fixup_bar(struct pci_dev *dev)
+{
+	dev->non_compliant_bars = true;
+	pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 0x80000000);
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0024, tango_fixup_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0028, tango_fixup_bar);
diff --git a/marvell/linux/drivers/pci/controller/pcie-xilinx-nwl.c b/marvell/linux/drivers/pci/controller/pcie-xilinx-nwl.c
new file mode 100644
index 0000000..f08606f
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -0,0 +1,914 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PCIe host controller driver for NWL PCIe Bridge
+ * Based on pcie-xilinx.c, pci-tegra.c
+ *
+ * (C) Copyright 2014 - 2015, Xilinx, Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/irqchip/chained_irq.h>
+
+#include "../pci.h"
+
+/* Bridge core config registers */
+#define BRCFG_PCIE_RX0			0x00000000
+#define BRCFG_INTERRUPT			0x00000010
+#define BRCFG_PCIE_RX_MSG_FILTER	0x00000020
+
+/* Egress - Bridge translation registers */
+#define E_BREG_CAPABILITIES		0x00000200
+#define E_BREG_CONTROL			0x00000208
+#define E_BREG_BASE_LO			0x00000210
+#define E_BREG_BASE_HI			0x00000214
+#define E_ECAM_CAPABILITIES		0x00000220
+#define E_ECAM_CONTROL			0x00000228
+#define E_ECAM_BASE_LO			0x00000230
+#define E_ECAM_BASE_HI			0x00000234
+
+/* Ingress - address translations */
+#define I_MSII_CAPABILITIES		0x00000300
+#define I_MSII_CONTROL			0x00000308
+#define I_MSII_BASE_LO			0x00000310
+#define I_MSII_BASE_HI			0x00000314
+
+#define I_ISUB_CONTROL			0x000003E8
+#define SET_ISUB_CONTROL		BIT(0)
+/* Rxed msg fifo  - Interrupt status registers */
+#define MSGF_MISC_STATUS		0x00000400
+#define MSGF_MISC_MASK			0x00000404
+#define MSGF_LEG_STATUS			0x00000420
+#define MSGF_LEG_MASK			0x00000424
+#define MSGF_MSI_STATUS_LO		0x00000440
+#define MSGF_MSI_STATUS_HI		0x00000444
+#define MSGF_MSI_MASK_LO		0x00000448
+#define MSGF_MSI_MASK_HI		0x0000044C
+
+/* Msg filter mask bits */
+#define CFG_ENABLE_PM_MSG_FWD		BIT(1)
+#define CFG_ENABLE_INT_MSG_FWD		BIT(2)
+#define CFG_ENABLE_ERR_MSG_FWD		BIT(3)
+#define CFG_ENABLE_MSG_FILTER_MASK	(CFG_ENABLE_PM_MSG_FWD | \
+					CFG_ENABLE_INT_MSG_FWD | \
+					CFG_ENABLE_ERR_MSG_FWD)
+
+/* Misc interrupt status mask bits */
+#define MSGF_MISC_SR_RXMSG_AVAIL	BIT(0)
+#define MSGF_MISC_SR_RXMSG_OVER		BIT(1)
+#define MSGF_MISC_SR_SLAVE_ERR		BIT(4)
+#define MSGF_MISC_SR_MASTER_ERR		BIT(5)
+#define MSGF_MISC_SR_I_ADDR_ERR		BIT(6)
+#define MSGF_MISC_SR_E_ADDR_ERR		BIT(7)
+#define MSGF_MISC_SR_FATAL_AER		BIT(16)
+#define MSGF_MISC_SR_NON_FATAL_AER	BIT(17)
+#define MSGF_MISC_SR_CORR_AER		BIT(18)
+#define MSGF_MISC_SR_UR_DETECT		BIT(20)
+#define MSGF_MISC_SR_NON_FATAL_DEV	BIT(22)
+#define MSGF_MISC_SR_FATAL_DEV		BIT(23)
+#define MSGF_MISC_SR_LINK_DOWN		BIT(24)
+#define MSGF_MISC_SR_LINK_AUTO_BWIDTH	BIT(25)
+#define MSGF_MISC_SR_LINK_BWIDTH	BIT(26)
+
+#define MSGF_MISC_SR_MASKALL		(MSGF_MISC_SR_RXMSG_AVAIL | \
+					MSGF_MISC_SR_RXMSG_OVER | \
+					MSGF_MISC_SR_SLAVE_ERR | \
+					MSGF_MISC_SR_MASTER_ERR | \
+					MSGF_MISC_SR_I_ADDR_ERR | \
+					MSGF_MISC_SR_E_ADDR_ERR | \
+					MSGF_MISC_SR_FATAL_AER | \
+					MSGF_MISC_SR_NON_FATAL_AER | \
+					MSGF_MISC_SR_CORR_AER | \
+					MSGF_MISC_SR_UR_DETECT | \
+					MSGF_MISC_SR_NON_FATAL_DEV | \
+					MSGF_MISC_SR_FATAL_DEV | \
+					MSGF_MISC_SR_LINK_DOWN | \
+					MSGF_MISC_SR_LINK_AUTO_BWIDTH | \
+					MSGF_MISC_SR_LINK_BWIDTH)
+
+/* Legacy interrupt status mask bits */
+#define MSGF_LEG_SR_INTA		BIT(0)
+#define MSGF_LEG_SR_INTB		BIT(1)
+#define MSGF_LEG_SR_INTC		BIT(2)
+#define MSGF_LEG_SR_INTD		BIT(3)
+#define MSGF_LEG_SR_MASKALL		(MSGF_LEG_SR_INTA | MSGF_LEG_SR_INTB | \
+					MSGF_LEG_SR_INTC | MSGF_LEG_SR_INTD)
+
+/* MSI interrupt status mask bits */
+#define MSGF_MSI_SR_LO_MASK		GENMASK(31, 0)
+#define MSGF_MSI_SR_HI_MASK		GENMASK(31, 0)
+
+#define MSII_PRESENT			BIT(0)
+#define MSII_ENABLE			BIT(0)
+#define MSII_STATUS_ENABLE		BIT(15)
+
+/* Bridge config interrupt mask */
+#define BRCFG_INTERRUPT_MASK		BIT(0)
+#define BREG_PRESENT			BIT(0)
+#define BREG_ENABLE			BIT(0)
+#define BREG_ENABLE_FORCE		BIT(1)
+
+/* E_ECAM status mask bits */
+#define E_ECAM_PRESENT			BIT(0)
+#define E_ECAM_CR_ENABLE		BIT(0)
+#define E_ECAM_SIZE_LOC			GENMASK(20, 16)
+#define E_ECAM_SIZE_SHIFT		16
+#define ECAM_BUS_LOC_SHIFT		20
+#define ECAM_DEV_LOC_SHIFT		12
+#define NWL_ECAM_VALUE_DEFAULT		12
+
+#define CFG_DMA_REG_BAR			GENMASK(2, 0)
+
+#define INT_PCI_MSI_NR			(2 * 32)
+
+/* Readin the PS_LINKUP */
+#define PS_LINKUP_OFFSET		0x00000238
+#define PCIE_PHY_LINKUP_BIT		BIT(0)
+#define PHY_RDY_LINKUP_BIT		BIT(1)
+
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES          10
+#define LINK_WAIT_USLEEP_MIN           90000
+#define LINK_WAIT_USLEEP_MAX           100000
+
+struct nwl_msi {			/* MSI information */
+	struct irq_domain *msi_domain;
+	unsigned long *bitmap;
+	struct irq_domain *dev_domain;
+	struct mutex lock;		/* protect bitmap variable */
+	int irq_msi0;
+	int irq_msi1;
+};
+
+struct nwl_pcie {
+	struct device *dev;
+	void __iomem *breg_base;
+	void __iomem *pcireg_base;
+	void __iomem *ecam_base;
+	phys_addr_t phys_breg_base;	/* Physical Bridge Register Base */
+	phys_addr_t phys_pcie_reg_base;	/* Physical PCIe Controller Base */
+	phys_addr_t phys_ecam_base;	/* Physical Configuration Base */
+	u32 breg_size;
+	u32 pcie_reg_size;
+	u32 ecam_size;
+	int irq_intx;
+	int irq_misc;
+	u32 ecam_value;
+	u8 last_busno;
+	u8 root_busno;
+	struct nwl_msi msi;
+	struct irq_domain *legacy_irq_domain;
+	struct clk *clk;
+	raw_spinlock_t leg_mask_lock;
+};
+
+static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off)
+{
+	return readl(pcie->breg_base + off);
+}
+
+static inline void nwl_bridge_writel(struct nwl_pcie *pcie, u32 val, u32 off)
+{
+	writel(val, pcie->breg_base + off);
+}
+
+static bool nwl_pcie_link_up(struct nwl_pcie *pcie)
+{
+	if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PCIE_PHY_LINKUP_BIT)
+		return true;
+	return false;
+}
+
+static bool nwl_phy_link_up(struct nwl_pcie *pcie)
+{
+	if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PHY_RDY_LINKUP_BIT)
+		return true;
+	return false;
+}
+
+static int nwl_wait_for_link(struct nwl_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	int retries;
+
+	/* check if the link is up or not */
+	for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+		if (nwl_phy_link_up(pcie))
+			return 0;
+		usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+	}
+
+	dev_err(dev, "PHY link never came up\n");
+	return -ETIMEDOUT;
+}
+
+static bool nwl_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
+{
+	struct nwl_pcie *pcie = bus->sysdata;
+
+	/* Check link before accessing downstream ports */
+	if (bus->number != pcie->root_busno) {
+		if (!nwl_pcie_link_up(pcie))
+			return false;
+	}
+
+	/* Only one device down on each root port */
+	if (bus->number == pcie->root_busno && devfn > 0)
+		return false;
+
+	return true;
+}
+
+/**
+ * nwl_pcie_map_bus - Get configuration base
+ *
+ * @bus: Bus structure of current bus
+ * @devfn: Device/function
+ * @where: Offset from base
+ *
+ * Return: Base address of the configuration space needed to be
+ *	   accessed.
+ */
+static void __iomem *nwl_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+				      int where)
+{
+	struct nwl_pcie *pcie = bus->sysdata;
+	int relbus;
+
+	if (!nwl_pcie_valid_device(bus, devfn))
+		return NULL;
+
+	relbus = (bus->number << ECAM_BUS_LOC_SHIFT) |
+			(devfn << ECAM_DEV_LOC_SHIFT);
+
+	return pcie->ecam_base + relbus + where;
+}
+
+/* PCIe operations */
+static struct pci_ops nwl_pcie_ops = {
+	.map_bus = nwl_pcie_map_bus,
+	.read  = pci_generic_config_read,
+	.write = pci_generic_config_write,
+};
+
+static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
+{
+	struct nwl_pcie *pcie = data;
+	struct device *dev = pcie->dev;
+	u32 misc_stat;
+
+	/* Checking for misc interrupts */
+	misc_stat = nwl_bridge_readl(pcie, MSGF_MISC_STATUS) &
+				     MSGF_MISC_SR_MASKALL;
+	if (!misc_stat)
+		return IRQ_NONE;
+
+	if (misc_stat & MSGF_MISC_SR_RXMSG_OVER)
+		dev_err(dev, "Received Message FIFO Overflow\n");
+
+	if (misc_stat & MSGF_MISC_SR_SLAVE_ERR)
+		dev_err(dev, "Slave error\n");
+
+	if (misc_stat & MSGF_MISC_SR_MASTER_ERR)
+		dev_err(dev, "Master error\n");
+
+	if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR)
+		dev_err(dev, "In Misc Ingress address translation error\n");
+
+	if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR)
+		dev_err(dev, "In Misc Egress address translation error\n");
+
+	if (misc_stat & MSGF_MISC_SR_FATAL_AER)
+		dev_err(dev, "Fatal Error in AER Capability\n");
+
+	if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER)
+		dev_err(dev, "Non-Fatal Error in AER Capability\n");
+
+	if (misc_stat & MSGF_MISC_SR_CORR_AER)
+		dev_err(dev, "Correctable Error in AER Capability\n");
+
+	if (misc_stat & MSGF_MISC_SR_UR_DETECT)
+		dev_err(dev, "Unsupported request Detected\n");
+
+	if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV)
+		dev_err(dev, "Non-Fatal Error Detected\n");
+
+	if (misc_stat & MSGF_MISC_SR_FATAL_DEV)
+		dev_err(dev, "Fatal Error Detected\n");
+
+	if (misc_stat & MSGF_MISC_SR_LINK_AUTO_BWIDTH)
+		dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n");
+
+	if (misc_stat & MSGF_MISC_SR_LINK_BWIDTH)
+		dev_info(dev, "Link Bandwidth Management Status bit set\n");
+
+	/* Clear misc interrupt status */
+	nwl_bridge_writel(pcie, misc_stat, MSGF_MISC_STATUS);
+
+	return IRQ_HANDLED;
+}
+
+static void nwl_pcie_leg_handler(struct irq_desc *desc)
+{
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct nwl_pcie *pcie;
+	unsigned long status;
+	u32 bit;
+	u32 virq;
+
+	chained_irq_enter(chip, desc);
+	pcie = irq_desc_get_handler_data(desc);
+
+	while ((status = nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
+				MSGF_LEG_SR_MASKALL) != 0) {
+		for_each_set_bit(bit, &status, PCI_NUM_INTX) {
+			virq = irq_find_mapping(pcie->legacy_irq_domain, bit);
+			if (virq)
+				generic_handle_irq(virq);
+		}
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+static void nwl_pcie_handle_msi_irq(struct nwl_pcie *pcie, u32 status_reg)
+{
+	struct nwl_msi *msi;
+	unsigned long status;
+	u32 bit;
+	u32 virq;
+
+	msi = &pcie->msi;
+
+	while ((status = nwl_bridge_readl(pcie, status_reg)) != 0) {
+		for_each_set_bit(bit, &status, 32) {
+			nwl_bridge_writel(pcie, 1 << bit, status_reg);
+			virq = irq_find_mapping(msi->dev_domain, bit);
+			if (virq)
+				generic_handle_irq(virq);
+		}
+	}
+}
+
+static void nwl_pcie_msi_handler_high(struct irq_desc *desc)
+{
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct nwl_pcie *pcie = irq_desc_get_handler_data(desc);
+
+	chained_irq_enter(chip, desc);
+	nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_HI);
+	chained_irq_exit(chip, desc);
+}
+
+static void nwl_pcie_msi_handler_low(struct irq_desc *desc)
+{
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct nwl_pcie *pcie = irq_desc_get_handler_data(desc);
+
+	chained_irq_enter(chip, desc);
+	nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_LO);
+	chained_irq_exit(chip, desc);
+}
+
+static void nwl_mask_leg_irq(struct irq_data *data)
+{
+	struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
+	unsigned long flags;
+	u32 mask;
+	u32 val;
+
+	mask = 1 << data->hwirq;
+	raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
+	val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
+	nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK);
+	raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
+}
+
+static void nwl_unmask_leg_irq(struct irq_data *data)
+{
+	struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
+	unsigned long flags;
+	u32 mask;
+	u32 val;
+
+	mask = 1 << data->hwirq;
+	raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
+	val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
+	nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK);
+	raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
+}
+
+static struct irq_chip nwl_leg_irq_chip = {
+	.name = "nwl_pcie:legacy",
+	.irq_enable = nwl_unmask_leg_irq,
+	.irq_disable = nwl_mask_leg_irq,
+	.irq_mask = nwl_mask_leg_irq,
+	.irq_unmask = nwl_unmask_leg_irq,
+};
+
+static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq,
+			  irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &nwl_leg_irq_chip, handle_level_irq);
+	irq_set_chip_data(irq, domain->host_data);
+	irq_set_status_flags(irq, IRQ_LEVEL);
+
+	return 0;
+}
+
+static const struct irq_domain_ops legacy_domain_ops = {
+	.map = nwl_legacy_map,
+	.xlate = pci_irqd_intx_xlate,
+};
+
+#ifdef CONFIG_PCI_MSI
+static struct irq_chip nwl_msi_irq_chip = {
+	.name = "nwl_pcie:msi",
+	.irq_enable = pci_msi_unmask_irq,
+	.irq_disable = pci_msi_mask_irq,
+	.irq_mask = pci_msi_mask_irq,
+	.irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info nwl_msi_domain_info = {
+	.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+		  MSI_FLAG_MULTI_PCI_MSI),
+	.chip = &nwl_msi_irq_chip,
+};
+#endif
+
+static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+	struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
+	phys_addr_t msi_addr = pcie->phys_pcie_reg_base;
+
+	msg->address_lo = lower_32_bits(msi_addr);
+	msg->address_hi = upper_32_bits(msi_addr);
+	msg->data = data->hwirq;
+}
+
+static int nwl_msi_set_affinity(struct irq_data *irq_data,
+				const struct cpumask *mask, bool force)
+{
+	return -EINVAL;
+}
+
+static struct irq_chip nwl_irq_chip = {
+	.name = "Xilinx MSI",
+	.irq_compose_msi_msg = nwl_compose_msi_msg,
+	.irq_set_affinity = nwl_msi_set_affinity,
+};
+
+static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+				unsigned int nr_irqs, void *args)
+{
+	struct nwl_pcie *pcie = domain->host_data;
+	struct nwl_msi *msi = &pcie->msi;
+	int bit;
+	int i;
+
+	mutex_lock(&msi->lock);
+	bit = bitmap_find_free_region(msi->bitmap, INT_PCI_MSI_NR,
+				      get_count_order(nr_irqs));
+	if (bit < 0) {
+		mutex_unlock(&msi->lock);
+		return -ENOSPC;
+	}
+
+	for (i = 0; i < nr_irqs; i++) {
+		irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip,
+				domain->host_data, handle_simple_irq,
+				NULL, NULL);
+	}
+	mutex_unlock(&msi->lock);
+	return 0;
+}
+
+static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+					unsigned int nr_irqs)
+{
+	struct irq_data *data = irq_domain_get_irq_data(domain, virq);
+	struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
+	struct nwl_msi *msi = &pcie->msi;
+
+	mutex_lock(&msi->lock);
+	bitmap_release_region(msi->bitmap, data->hwirq,
+			      get_count_order(nr_irqs));
+	mutex_unlock(&msi->lock);
+}
+
+static const struct irq_domain_ops dev_msi_domain_ops = {
+	.alloc  = nwl_irq_domain_alloc,
+	.free   = nwl_irq_domain_free,
+};
+
+static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
+{
+#ifdef CONFIG_PCI_MSI
+	struct device *dev = pcie->dev;
+	struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
+	struct nwl_msi *msi = &pcie->msi;
+
+	msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR,
+						&dev_msi_domain_ops, pcie);
+	if (!msi->dev_domain) {
+		dev_err(dev, "failed to create dev IRQ domain\n");
+		return -ENOMEM;
+	}
+	msi->msi_domain = pci_msi_create_irq_domain(fwnode,
+						    &nwl_msi_domain_info,
+						    msi->dev_domain);
+	if (!msi->msi_domain) {
+		dev_err(dev, "failed to create msi IRQ domain\n");
+		irq_domain_remove(msi->dev_domain);
+		return -ENOMEM;
+	}
+#endif
+	return 0;
+}
+
+static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	struct device_node *node = dev->of_node;
+	struct device_node *legacy_intc_node;
+
+	legacy_intc_node = of_get_next_child(node, NULL);
+	if (!legacy_intc_node) {
+		dev_err(dev, "No legacy intc node found\n");
+		return -EINVAL;
+	}
+
+	pcie->legacy_irq_domain = irq_domain_add_linear(legacy_intc_node,
+							PCI_NUM_INTX,
+							&legacy_domain_ops,
+							pcie);
+	of_node_put(legacy_intc_node);
+	if (!pcie->legacy_irq_domain) {
+		dev_err(dev, "failed to create IRQ domain\n");
+		return -ENOMEM;
+	}
+
+	raw_spin_lock_init(&pcie->leg_mask_lock);
+	nwl_pcie_init_msi_irq_domain(pcie);
+	return 0;
+}
+
+static int nwl_pcie_enable_msi(struct nwl_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct nwl_msi *msi = &pcie->msi;
+	unsigned long base;
+	int ret;
+	int size = BITS_TO_LONGS(INT_PCI_MSI_NR) * sizeof(long);
+
+	mutex_init(&msi->lock);
+
+	msi->bitmap = kzalloc(size, GFP_KERNEL);
+	if (!msi->bitmap)
+		return -ENOMEM;
+
+	/* Get msi_1 IRQ number */
+	msi->irq_msi1 = platform_get_irq_byname(pdev, "msi1");
+	if (msi->irq_msi1 < 0) {
+		dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi1);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	irq_set_chained_handler_and_data(msi->irq_msi1,
+					 nwl_pcie_msi_handler_high, pcie);
+
+	/* Get msi_0 IRQ number */
+	msi->irq_msi0 = platform_get_irq_byname(pdev, "msi0");
+	if (msi->irq_msi0 < 0) {
+		dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi0);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	irq_set_chained_handler_and_data(msi->irq_msi0,
+					 nwl_pcie_msi_handler_low, pcie);
+
+	/* Check for msii_present bit */
+	ret = nwl_bridge_readl(pcie, I_MSII_CAPABILITIES) & MSII_PRESENT;
+	if (!ret) {
+		dev_err(dev, "MSI not present\n");
+		ret = -EIO;
+		goto err;
+	}
+
+	/* Enable MSII */
+	nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) |
+			  MSII_ENABLE, I_MSII_CONTROL);
+
+	/* Enable MSII status */
+	nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) |
+			  MSII_STATUS_ENABLE, I_MSII_CONTROL);
+
+	/* setup AFI/FPCI range */
+	base = pcie->phys_pcie_reg_base;
+	nwl_bridge_writel(pcie, lower_32_bits(base), I_MSII_BASE_LO);
+	nwl_bridge_writel(pcie, upper_32_bits(base), I_MSII_BASE_HI);
+
+	/*
+	 * For high range MSI interrupts: disable, clear any pending,
+	 * and enable
+	 */
+	nwl_bridge_writel(pcie, 0, MSGF_MSI_MASK_HI);
+
+	nwl_bridge_writel(pcie, nwl_bridge_readl(pcie,  MSGF_MSI_STATUS_HI) &
+			  MSGF_MSI_SR_HI_MASK, MSGF_MSI_STATUS_HI);
+
+	nwl_bridge_writel(pcie, MSGF_MSI_SR_HI_MASK, MSGF_MSI_MASK_HI);
+
+	/*
+	 * For low range MSI interrupts: disable, clear any pending,
+	 * and enable
+	 */
+	nwl_bridge_writel(pcie, 0, MSGF_MSI_MASK_LO);
+
+	nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MSI_STATUS_LO) &
+			  MSGF_MSI_SR_LO_MASK, MSGF_MSI_STATUS_LO);
+
+	nwl_bridge_writel(pcie, MSGF_MSI_SR_LO_MASK, MSGF_MSI_MASK_LO);
+
+	return 0;
+err:
+	kfree(msi->bitmap);
+	msi->bitmap = NULL;
+	return ret;
+}
+
+static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	struct platform_device *pdev = to_platform_device(dev);
+	u32 breg_val, ecam_val, first_busno = 0;
+	int err;
+
+	breg_val = nwl_bridge_readl(pcie, E_BREG_CAPABILITIES) & BREG_PRESENT;
+	if (!breg_val) {
+		dev_err(dev, "BREG is not present\n");
+		return breg_val;
+	}
+
+	/* Write bridge_off to breg base */
+	nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_breg_base),
+			  E_BREG_BASE_LO);
+	nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_breg_base),
+			  E_BREG_BASE_HI);
+
+	/* Enable BREG */
+	nwl_bridge_writel(pcie, ~BREG_ENABLE_FORCE & BREG_ENABLE,
+			  E_BREG_CONTROL);
+
+	/* Disable DMA channel registers */
+	nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_PCIE_RX0) |
+			  CFG_DMA_REG_BAR, BRCFG_PCIE_RX0);
+
+	/* Enable Ingress subtractive decode translation */
+	nwl_bridge_writel(pcie, SET_ISUB_CONTROL, I_ISUB_CONTROL);
+
+	/* Enable msg filtering details */
+	nwl_bridge_writel(pcie, CFG_ENABLE_MSG_FILTER_MASK,
+			  BRCFG_PCIE_RX_MSG_FILTER);
+
+	err = nwl_wait_for_link(pcie);
+	if (err)
+		return err;
+
+	ecam_val = nwl_bridge_readl(pcie, E_ECAM_CAPABILITIES) & E_ECAM_PRESENT;
+	if (!ecam_val) {
+		dev_err(dev, "ECAM is not present\n");
+		return ecam_val;
+	}
+
+	/* Enable ECAM */
+	nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
+			  E_ECAM_CR_ENABLE, E_ECAM_CONTROL);
+
+	nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
+			  (pcie->ecam_value << E_ECAM_SIZE_SHIFT),
+			  E_ECAM_CONTROL);
+
+	nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_ecam_base),
+			  E_ECAM_BASE_LO);
+	nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_ecam_base),
+			  E_ECAM_BASE_HI);
+
+	/* Get bus range */
+	ecam_val = nwl_bridge_readl(pcie, E_ECAM_CONTROL);
+	pcie->last_busno = (ecam_val & E_ECAM_SIZE_LOC) >> E_ECAM_SIZE_SHIFT;
+	/* Write primary, secondary and subordinate bus numbers */
+	ecam_val = first_busno;
+	ecam_val |= (first_busno + 1) << 8;
+	ecam_val |= (pcie->last_busno << E_ECAM_SIZE_SHIFT);
+	writel(ecam_val, (pcie->ecam_base + PCI_PRIMARY_BUS));
+
+	if (nwl_pcie_link_up(pcie))
+		dev_info(dev, "Link is UP\n");
+	else
+		dev_info(dev, "Link is DOWN\n");
+
+	/* Get misc IRQ number */
+	pcie->irq_misc = platform_get_irq_byname(pdev, "misc");
+	if (pcie->irq_misc < 0) {
+		dev_err(dev, "failed to get misc IRQ %d\n",
+			pcie->irq_misc);
+		return -EINVAL;
+	}
+
+	err = devm_request_irq(dev, pcie->irq_misc,
+			       nwl_pcie_misc_handler, IRQF_SHARED,
+			       "nwl_pcie:misc", pcie);
+	if (err) {
+		dev_err(dev, "fail to register misc IRQ#%d\n",
+			pcie->irq_misc);
+		return err;
+	}
+
+	/* Disable all misc interrupts */
+	nwl_bridge_writel(pcie, (u32)~MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK);
+
+	/* Clear pending misc interrupts */
+	nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MISC_STATUS) &
+			  MSGF_MISC_SR_MASKALL, MSGF_MISC_STATUS);
+
+	/* Enable all misc interrupts */
+	nwl_bridge_writel(pcie, MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK);
+
+
+	/* Disable all legacy interrupts */
+	nwl_bridge_writel(pcie, (u32)~MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
+
+	/* Clear pending legacy interrupts */
+	nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
+			  MSGF_LEG_SR_MASKALL, MSGF_LEG_STATUS);
+
+	/* Enable all legacy interrupts */
+	nwl_bridge_writel(pcie, MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
+
+	/* Enable the bridge config interrupt */
+	nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_INTERRUPT) |
+			  BRCFG_INTERRUPT_MASK, BRCFG_INTERRUPT);
+
+	return 0;
+}
+
+static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
+			     struct platform_device *pdev)
+{
+	struct device *dev = pcie->dev;
+	struct resource *res;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
+	pcie->breg_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pcie->breg_base))
+		return PTR_ERR(pcie->breg_base);
+	pcie->phys_breg_base = res->start;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcireg");
+	pcie->pcireg_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pcie->pcireg_base))
+		return PTR_ERR(pcie->pcireg_base);
+	pcie->phys_pcie_reg_base = res->start;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+	pcie->ecam_base = devm_pci_remap_cfg_resource(dev, res);
+	if (IS_ERR(pcie->ecam_base))
+		return PTR_ERR(pcie->ecam_base);
+	pcie->phys_ecam_base = res->start;
+
+	/* Get intx IRQ number */
+	pcie->irq_intx = platform_get_irq_byname(pdev, "intx");
+	if (pcie->irq_intx < 0) {
+		dev_err(dev, "failed to get intx IRQ %d\n", pcie->irq_intx);
+		return pcie->irq_intx;
+	}
+
+	irq_set_chained_handler_and_data(pcie->irq_intx,
+					 nwl_pcie_leg_handler, pcie);
+
+	return 0;
+}
+
+static const struct of_device_id nwl_pcie_of_match[] = {
+	{ .compatible = "xlnx,nwl-pcie-2.11", },
+	{}
+};
+
+static int nwl_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct nwl_pcie *pcie;
+	struct pci_bus *bus;
+	struct pci_bus *child;
+	struct pci_host_bridge *bridge;
+	int err;
+	resource_size_t iobase = 0;
+	LIST_HEAD(res);
+
+	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+	if (!bridge)
+		return -ENODEV;
+
+	pcie = pci_host_bridge_priv(bridge);
+
+	pcie->dev = dev;
+	pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT;
+
+	err = nwl_pcie_parse_dt(pcie, pdev);
+	if (err) {
+		dev_err(dev, "Parsing DT failed\n");
+		return err;
+	}
+
+	pcie->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(pcie->clk))
+		return PTR_ERR(pcie->clk);
+
+	err = clk_prepare_enable(pcie->clk);
+	if (err) {
+		dev_err(dev, "can't enable PCIe ref clock\n");
+		return err;
+	}
+
+	err = nwl_pcie_bridge_init(pcie);
+	if (err) {
+		dev_err(dev, "HW Initialization failed\n");
+		return err;
+	}
+
+	err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
+						    &iobase);
+	if (err) {
+		dev_err(dev, "Getting bridge resources failed\n");
+		return err;
+	}
+
+	err = devm_request_pci_bus_resources(dev, &res);
+	if (err)
+		goto error;
+
+	err = nwl_pcie_init_irq_domain(pcie);
+	if (err) {
+		dev_err(dev, "Failed creating IRQ Domain\n");
+		goto error;
+	}
+
+	list_splice_init(&res, &bridge->windows);
+	bridge->dev.parent = dev;
+	bridge->sysdata = pcie;
+	bridge->busnr = pcie->root_busno;
+	bridge->ops = &nwl_pcie_ops;
+	bridge->map_irq = of_irq_parse_and_map_pci;
+	bridge->swizzle_irq = pci_common_swizzle;
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		err = nwl_pcie_enable_msi(pcie);
+		if (err < 0) {
+			dev_err(dev, "failed to enable MSI support: %d\n", err);
+			goto error;
+		}
+	}
+
+	err = pci_scan_root_bus_bridge(bridge);
+	if (err)
+		goto error;
+
+	bus = bridge->bus;
+
+	pci_assign_unassigned_bus_resources(bus);
+	list_for_each_entry(child, &bus->children, node)
+		pcie_bus_configure_settings(child);
+	pci_bus_add_devices(bus);
+	return 0;
+
+error:
+	pci_free_resource_list(&res);
+	return err;
+}
+
+static struct platform_driver nwl_pcie_driver = {
+	.driver = {
+		.name = "nwl-pcie",
+		.suppress_bind_attrs = true,
+		.of_match_table = nwl_pcie_of_match,
+	},
+	.probe = nwl_pcie_probe,
+};
+builtin_platform_driver(nwl_pcie_driver);
diff --git a/marvell/linux/drivers/pci/controller/pcie-xilinx.c b/marvell/linux/drivers/pci/controller/pcie-xilinx.c
new file mode 100644
index 0000000..5bf3af3
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/pcie-xilinx.c
@@ -0,0 +1,704 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PCIe host controller driver for Xilinx AXI PCIe Bridge
+ *
+ * Copyright (c) 2012 - 2014 Xilinx, Inc.
+ *
+ * Based on the Tegra PCIe driver
+ *
+ * Bits taken from Synopsys DesignWare Host controller driver and
+ * ARM PCI Host generic driver.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include "../pci.h"
+
+/* Register definitions */
+#define XILINX_PCIE_REG_BIR		0x00000130
+#define XILINX_PCIE_REG_IDR		0x00000138
+#define XILINX_PCIE_REG_IMR		0x0000013c
+#define XILINX_PCIE_REG_PSCR		0x00000144
+#define XILINX_PCIE_REG_RPSC		0x00000148
+#define XILINX_PCIE_REG_MSIBASE1	0x0000014c
+#define XILINX_PCIE_REG_MSIBASE2	0x00000150
+#define XILINX_PCIE_REG_RPEFR		0x00000154
+#define XILINX_PCIE_REG_RPIFR1		0x00000158
+#define XILINX_PCIE_REG_RPIFR2		0x0000015c
+
+/* Interrupt registers definitions */
+#define XILINX_PCIE_INTR_LINK_DOWN	BIT(0)
+#define XILINX_PCIE_INTR_ECRC_ERR	BIT(1)
+#define XILINX_PCIE_INTR_STR_ERR	BIT(2)
+#define XILINX_PCIE_INTR_HOT_RESET	BIT(3)
+#define XILINX_PCIE_INTR_CFG_TIMEOUT	BIT(8)
+#define XILINX_PCIE_INTR_CORRECTABLE	BIT(9)
+#define XILINX_PCIE_INTR_NONFATAL	BIT(10)
+#define XILINX_PCIE_INTR_FATAL		BIT(11)
+#define XILINX_PCIE_INTR_INTX		BIT(16)
+#define XILINX_PCIE_INTR_MSI		BIT(17)
+#define XILINX_PCIE_INTR_SLV_UNSUPP	BIT(20)
+#define XILINX_PCIE_INTR_SLV_UNEXP	BIT(21)
+#define XILINX_PCIE_INTR_SLV_COMPL	BIT(22)
+#define XILINX_PCIE_INTR_SLV_ERRP	BIT(23)
+#define XILINX_PCIE_INTR_SLV_CMPABT	BIT(24)
+#define XILINX_PCIE_INTR_SLV_ILLBUR	BIT(25)
+#define XILINX_PCIE_INTR_MST_DECERR	BIT(26)
+#define XILINX_PCIE_INTR_MST_SLVERR	BIT(27)
+#define XILINX_PCIE_INTR_MST_ERRP	BIT(28)
+#define XILINX_PCIE_IMR_ALL_MASK	0x1FF30FED
+#define XILINX_PCIE_IMR_ENABLE_MASK	0x1FF30F0D
+#define XILINX_PCIE_IDR_ALL_MASK	0xFFFFFFFF
+
+/* Root Port Error FIFO Read Register definitions */
+#define XILINX_PCIE_RPEFR_ERR_VALID	BIT(18)
+#define XILINX_PCIE_RPEFR_REQ_ID	GENMASK(15, 0)
+#define XILINX_PCIE_RPEFR_ALL_MASK	0xFFFFFFFF
+
+/* Root Port Interrupt FIFO Read Register 1 definitions */
+#define XILINX_PCIE_RPIFR1_INTR_VALID	BIT(31)
+#define XILINX_PCIE_RPIFR1_MSI_INTR	BIT(30)
+#define XILINX_PCIE_RPIFR1_INTR_MASK	GENMASK(28, 27)
+#define XILINX_PCIE_RPIFR1_ALL_MASK	0xFFFFFFFF
+#define XILINX_PCIE_RPIFR1_INTR_SHIFT	27
+
+/* Bridge Info Register definitions */
+#define XILINX_PCIE_BIR_ECAM_SZ_MASK	GENMASK(18, 16)
+#define XILINX_PCIE_BIR_ECAM_SZ_SHIFT	16
+
+/* Root Port Interrupt FIFO Read Register 2 definitions */
+#define XILINX_PCIE_RPIFR2_MSG_DATA	GENMASK(15, 0)
+
+/* Root Port Status/control Register definitions */
+#define XILINX_PCIE_REG_RPSC_BEN	BIT(0)
+
+/* Phy Status/Control Register definitions */
+#define XILINX_PCIE_REG_PSCR_LNKUP	BIT(11)
+
+/* ECAM definitions */
+#define ECAM_BUS_NUM_SHIFT		20
+#define ECAM_DEV_NUM_SHIFT		12
+
+/* Number of MSI IRQs */
+#define XILINX_NUM_MSI_IRQS		128
+
+/**
+ * struct xilinx_pcie_port - PCIe port information
+ * @reg_base: IO Mapped Register Base
+ * @irq: Interrupt number
+ * @msi_pages: MSI pages
+ * @root_busno: Root Bus number
+ * @dev: Device pointer
+ * @msi_domain: MSI IRQ domain pointer
+ * @leg_domain: Legacy IRQ domain pointer
+ * @resources: Bus Resources
+ */
+struct xilinx_pcie_port {
+	void __iomem *reg_base;
+	u32 irq;
+	unsigned long msi_pages;
+	u8 root_busno;
+	struct device *dev;
+	struct irq_domain *msi_domain;
+	struct irq_domain *leg_domain;
+	struct list_head resources;
+};
+
+static DECLARE_BITMAP(msi_irq_in_use, XILINX_NUM_MSI_IRQS);
+
+static inline u32 pcie_read(struct xilinx_pcie_port *port, u32 reg)
+{
+	return readl(port->reg_base + reg);
+}
+
+static inline void pcie_write(struct xilinx_pcie_port *port, u32 val, u32 reg)
+{
+	writel(val, port->reg_base + reg);
+}
+
+static inline bool xilinx_pcie_link_up(struct xilinx_pcie_port *port)
+{
+	return (pcie_read(port, XILINX_PCIE_REG_PSCR) &
+		XILINX_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
+}
+
+/**
+ * xilinx_pcie_clear_err_interrupts - Clear Error Interrupts
+ * @port: PCIe port information
+ */
+static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port)
+{
+	struct device *dev = port->dev;
+	unsigned long val = pcie_read(port, XILINX_PCIE_REG_RPEFR);
+
+	if (val & XILINX_PCIE_RPEFR_ERR_VALID) {
+		dev_dbg(dev, "Requester ID %lu\n",
+			val & XILINX_PCIE_RPEFR_REQ_ID);
+		pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK,
+			   XILINX_PCIE_REG_RPEFR);
+	}
+}
+
+/**
+ * xilinx_pcie_valid_device - Check if a valid device is present on bus
+ * @bus: PCI Bus structure
+ * @devfn: device/function
+ *
+ * Return: 'true' on success and 'false' if invalid device is found
+ */
+static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
+{
+	struct xilinx_pcie_port *port = bus->sysdata;
+
+	/* Check if link is up when trying to access downstream ports */
+	if (bus->number != port->root_busno)
+		if (!xilinx_pcie_link_up(port))
+			return false;
+
+	/* Only one device down on each root port */
+	if (bus->number == port->root_busno && devfn > 0)
+		return false;
+
+	return true;
+}
+
+/**
+ * xilinx_pcie_map_bus - Get configuration base
+ * @bus: PCI Bus structure
+ * @devfn: Device/function
+ * @where: Offset from base
+ *
+ * Return: Base address of the configuration space needed to be
+ *	   accessed.
+ */
+static void __iomem *xilinx_pcie_map_bus(struct pci_bus *bus,
+					 unsigned int devfn, int where)
+{
+	struct xilinx_pcie_port *port = bus->sysdata;
+	int relbus;
+
+	if (!xilinx_pcie_valid_device(bus, devfn))
+		return NULL;
+
+	relbus = (bus->number << ECAM_BUS_NUM_SHIFT) |
+		 (devfn << ECAM_DEV_NUM_SHIFT);
+
+	return port->reg_base + relbus + where;
+}
+
+/* PCIe operations */
+static struct pci_ops xilinx_pcie_ops = {
+	.map_bus = xilinx_pcie_map_bus,
+	.read	= pci_generic_config_read,
+	.write	= pci_generic_config_write,
+};
+
+/* MSI functions */
+
+/**
+ * xilinx_pcie_destroy_msi - Free MSI number
+ * @irq: IRQ to be freed
+ */
+static void xilinx_pcie_destroy_msi(unsigned int irq)
+{
+	struct msi_desc *msi;
+	struct xilinx_pcie_port *port;
+	struct irq_data *d = irq_get_irq_data(irq);
+	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+	if (!test_bit(hwirq, msi_irq_in_use)) {
+		msi = irq_get_msi_desc(irq);
+		port = msi_desc_to_pci_sysdata(msi);
+		dev_err(port->dev, "Trying to free unused MSI#%d\n", irq);
+	} else {
+		clear_bit(hwirq, msi_irq_in_use);
+	}
+}
+
+/**
+ * xilinx_pcie_assign_msi - Allocate MSI number
+ *
+ * Return: A valid IRQ on success and error value on failure.
+ */
+static int xilinx_pcie_assign_msi(void)
+{
+	int pos;
+
+	pos = find_first_zero_bit(msi_irq_in_use, XILINX_NUM_MSI_IRQS);
+	if (pos < XILINX_NUM_MSI_IRQS)
+		set_bit(pos, msi_irq_in_use);
+	else
+		return -ENOSPC;
+
+	return pos;
+}
+
+/**
+ * xilinx_msi_teardown_irq - Destroy the MSI
+ * @chip: MSI Chip descriptor
+ * @irq: MSI IRQ to destroy
+ */
+static void xilinx_msi_teardown_irq(struct msi_controller *chip,
+				    unsigned int irq)
+{
+	xilinx_pcie_destroy_msi(irq);
+	irq_dispose_mapping(irq);
+}
+
+/**
+ * xilinx_pcie_msi_setup_irq - Setup MSI request
+ * @chip: MSI chip pointer
+ * @pdev: PCIe device pointer
+ * @desc: MSI descriptor pointer
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_msi_setup_irq(struct msi_controller *chip,
+				     struct pci_dev *pdev,
+				     struct msi_desc *desc)
+{
+	struct xilinx_pcie_port *port = pdev->bus->sysdata;
+	unsigned int irq;
+	int hwirq;
+	struct msi_msg msg;
+	phys_addr_t msg_addr;
+
+	hwirq = xilinx_pcie_assign_msi();
+	if (hwirq < 0)
+		return hwirq;
+
+	irq = irq_create_mapping(port->msi_domain, hwirq);
+	if (!irq)
+		return -EINVAL;
+
+	irq_set_msi_desc(irq, desc);
+
+	msg_addr = virt_to_phys((void *)port->msi_pages);
+
+	msg.address_hi = 0;
+	msg.address_lo = msg_addr;
+	msg.data = irq;
+
+	pci_write_msi_msg(irq, &msg);
+
+	return 0;
+}
+
+/* MSI Chip Descriptor */
+static struct msi_controller xilinx_pcie_msi_chip = {
+	.setup_irq = xilinx_pcie_msi_setup_irq,
+	.teardown_irq = xilinx_msi_teardown_irq,
+};
+
+/* HW Interrupt Chip Descriptor */
+static struct irq_chip xilinx_msi_irq_chip = {
+	.name = "Xilinx PCIe MSI",
+	.irq_enable = pci_msi_unmask_irq,
+	.irq_disable = pci_msi_mask_irq,
+	.irq_mask = pci_msi_mask_irq,
+	.irq_unmask = pci_msi_unmask_irq,
+};
+
+/**
+ * xilinx_pcie_msi_map - Set the handler for the MSI and mark IRQ as valid
+ * @domain: IRQ domain
+ * @irq: Virtual IRQ number
+ * @hwirq: HW interrupt number
+ *
+ * Return: Always returns 0.
+ */
+static int xilinx_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
+			       irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &xilinx_msi_irq_chip, handle_simple_irq);
+	irq_set_chip_data(irq, domain->host_data);
+
+	return 0;
+}
+
+/* IRQ Domain operations */
+static const struct irq_domain_ops msi_domain_ops = {
+	.map = xilinx_pcie_msi_map,
+};
+
+/**
+ * xilinx_pcie_enable_msi - Enable MSI support
+ * @port: PCIe port information
+ */
+static int xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
+{
+	phys_addr_t msg_addr;
+
+	port->msi_pages = __get_free_pages(GFP_KERNEL, 0);
+	if (!port->msi_pages)
+		return -ENOMEM;
+
+	msg_addr = virt_to_phys((void *)port->msi_pages);
+	pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1);
+	pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2);
+
+	return 0;
+}
+
+/* INTx Functions */
+
+/**
+ * xilinx_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
+ * @domain: IRQ domain
+ * @irq: Virtual IRQ number
+ * @hwirq: HW interrupt number
+ *
+ * Return: Always returns 0.
+ */
+static int xilinx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+				irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+	irq_set_chip_data(irq, domain->host_data);
+
+	return 0;
+}
+
+/* INTx IRQ Domain operations */
+static const struct irq_domain_ops intx_domain_ops = {
+	.map = xilinx_pcie_intx_map,
+	.xlate = pci_irqd_intx_xlate,
+};
+
+/* PCIe HW Functions */
+
+/**
+ * xilinx_pcie_intr_handler - Interrupt Service Handler
+ * @irq: IRQ number
+ * @data: PCIe port information
+ *
+ * Return: IRQ_HANDLED on success and IRQ_NONE on failure
+ */
+static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
+{
+	struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data;
+	struct device *dev = port->dev;
+	u32 val, mask, status;
+
+	/* Read interrupt decode and mask registers */
+	val = pcie_read(port, XILINX_PCIE_REG_IDR);
+	mask = pcie_read(port, XILINX_PCIE_REG_IMR);
+
+	status = val & mask;
+	if (!status)
+		return IRQ_NONE;
+
+	if (status & XILINX_PCIE_INTR_LINK_DOWN)
+		dev_warn(dev, "Link Down\n");
+
+	if (status & XILINX_PCIE_INTR_ECRC_ERR)
+		dev_warn(dev, "ECRC failed\n");
+
+	if (status & XILINX_PCIE_INTR_STR_ERR)
+		dev_warn(dev, "Streaming error\n");
+
+	if (status & XILINX_PCIE_INTR_HOT_RESET)
+		dev_info(dev, "Hot reset\n");
+
+	if (status & XILINX_PCIE_INTR_CFG_TIMEOUT)
+		dev_warn(dev, "ECAM access timeout\n");
+
+	if (status & XILINX_PCIE_INTR_CORRECTABLE) {
+		dev_warn(dev, "Correctable error message\n");
+		xilinx_pcie_clear_err_interrupts(port);
+	}
+
+	if (status & XILINX_PCIE_INTR_NONFATAL) {
+		dev_warn(dev, "Non fatal error message\n");
+		xilinx_pcie_clear_err_interrupts(port);
+	}
+
+	if (status & XILINX_PCIE_INTR_FATAL) {
+		dev_warn(dev, "Fatal error message\n");
+		xilinx_pcie_clear_err_interrupts(port);
+	}
+
+	if (status & (XILINX_PCIE_INTR_INTX | XILINX_PCIE_INTR_MSI)) {
+		val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
+
+		/* Check whether interrupt valid */
+		if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
+			dev_warn(dev, "RP Intr FIFO1 read error\n");
+			goto error;
+		}
+
+		/* Decode the IRQ number */
+		if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
+			val = pcie_read(port, XILINX_PCIE_REG_RPIFR2) &
+				XILINX_PCIE_RPIFR2_MSG_DATA;
+		} else {
+			val = (val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
+				XILINX_PCIE_RPIFR1_INTR_SHIFT;
+			val = irq_find_mapping(port->leg_domain, val);
+		}
+
+		/* Clear interrupt FIFO register 1 */
+		pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
+			   XILINX_PCIE_REG_RPIFR1);
+
+		/* Handle the interrupt */
+		if (IS_ENABLED(CONFIG_PCI_MSI) ||
+		    !(val & XILINX_PCIE_RPIFR1_MSI_INTR))
+			generic_handle_irq(val);
+	}
+
+	if (status & XILINX_PCIE_INTR_SLV_UNSUPP)
+		dev_warn(dev, "Slave unsupported request\n");
+
+	if (status & XILINX_PCIE_INTR_SLV_UNEXP)
+		dev_warn(dev, "Slave unexpected completion\n");
+
+	if (status & XILINX_PCIE_INTR_SLV_COMPL)
+		dev_warn(dev, "Slave completion timeout\n");
+
+	if (status & XILINX_PCIE_INTR_SLV_ERRP)
+		dev_warn(dev, "Slave Error Poison\n");
+
+	if (status & XILINX_PCIE_INTR_SLV_CMPABT)
+		dev_warn(dev, "Slave Completer Abort\n");
+
+	if (status & XILINX_PCIE_INTR_SLV_ILLBUR)
+		dev_warn(dev, "Slave Illegal Burst\n");
+
+	if (status & XILINX_PCIE_INTR_MST_DECERR)
+		dev_warn(dev, "Master decode error\n");
+
+	if (status & XILINX_PCIE_INTR_MST_SLVERR)
+		dev_warn(dev, "Master slave error\n");
+
+	if (status & XILINX_PCIE_INTR_MST_ERRP)
+		dev_warn(dev, "Master error poison\n");
+
+error:
+	/* Clear the Interrupt Decode register */
+	pcie_write(port, status, XILINX_PCIE_REG_IDR);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * xilinx_pcie_init_irq_domain - Initialize IRQ domain
+ * @port: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
+{
+	struct device *dev = port->dev;
+	struct device_node *node = dev->of_node;
+	struct device_node *pcie_intc_node;
+	int ret;
+
+	/* Setup INTx */
+	pcie_intc_node = of_get_next_child(node, NULL);
+	if (!pcie_intc_node) {
+		dev_err(dev, "No PCIe Intc node found\n");
+		return -ENODEV;
+	}
+
+	port->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+						 &intx_domain_ops,
+						 port);
+	of_node_put(pcie_intc_node);
+	if (!port->leg_domain) {
+		dev_err(dev, "Failed to get a INTx IRQ domain\n");
+		return -ENODEV;
+	}
+
+	/* Setup MSI */
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		port->msi_domain = irq_domain_add_linear(node,
+							 XILINX_NUM_MSI_IRQS,
+							 &msi_domain_ops,
+							 &xilinx_pcie_msi_chip);
+		if (!port->msi_domain) {
+			dev_err(dev, "Failed to get a MSI IRQ domain\n");
+			return -ENODEV;
+		}
+
+		ret = xilinx_pcie_enable_msi(port);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * xilinx_pcie_init_port - Initialize hardware
+ * @port: PCIe port information
+ */
+static void xilinx_pcie_init_port(struct xilinx_pcie_port *port)
+{
+	struct device *dev = port->dev;
+
+	if (xilinx_pcie_link_up(port))
+		dev_info(dev, "PCIe Link is UP\n");
+	else
+		dev_info(dev, "PCIe Link is DOWN\n");
+
+	/* Disable all interrupts */
+	pcie_write(port, ~XILINX_PCIE_IDR_ALL_MASK,
+		   XILINX_PCIE_REG_IMR);
+
+	/* Clear pending interrupts */
+	pcie_write(port, pcie_read(port, XILINX_PCIE_REG_IDR) &
+			 XILINX_PCIE_IMR_ALL_MASK,
+		   XILINX_PCIE_REG_IDR);
+
+	/* Enable all interrupts we handle */
+	pcie_write(port, XILINX_PCIE_IMR_ENABLE_MASK, XILINX_PCIE_REG_IMR);
+
+	/* Enable the Bridge enable bit */
+	pcie_write(port, pcie_read(port, XILINX_PCIE_REG_RPSC) |
+			 XILINX_PCIE_REG_RPSC_BEN,
+		   XILINX_PCIE_REG_RPSC);
+}
+
+/**
+ * xilinx_pcie_parse_dt - Parse Device tree
+ * @port: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
+{
+	struct device *dev = port->dev;
+	struct device_node *node = dev->of_node;
+	struct resource regs;
+	int err;
+
+	err = of_address_to_resource(node, 0, &regs);
+	if (err) {
+		dev_err(dev, "missing \"reg\" property\n");
+		return err;
+	}
+
+	port->reg_base = devm_pci_remap_cfg_resource(dev, &regs);
+	if (IS_ERR(port->reg_base))
+		return PTR_ERR(port->reg_base);
+
+	port->irq = irq_of_parse_and_map(node, 0);
+	err = devm_request_irq(dev, port->irq, xilinx_pcie_intr_handler,
+			       IRQF_SHARED | IRQF_NO_THREAD,
+			       "xilinx-pcie", port);
+	if (err) {
+		dev_err(dev, "unable to request irq %d\n", port->irq);
+		return err;
+	}
+
+	return 0;
+}
+
+/**
+ * xilinx_pcie_probe - Probe function
+ * @pdev: Platform device pointer
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct xilinx_pcie_port *port;
+	struct pci_bus *bus, *child;
+	struct pci_host_bridge *bridge;
+	int err;
+	resource_size_t iobase = 0;
+	LIST_HEAD(res);
+
+	if (!dev->of_node)
+		return -ENODEV;
+
+	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
+	if (!bridge)
+		return -ENODEV;
+
+	port = pci_host_bridge_priv(bridge);
+
+	port->dev = dev;
+
+	err = xilinx_pcie_parse_dt(port);
+	if (err) {
+		dev_err(dev, "Parsing DT failed\n");
+		return err;
+	}
+
+	xilinx_pcie_init_port(port);
+
+	err = xilinx_pcie_init_irq_domain(port);
+	if (err) {
+		dev_err(dev, "Failed creating IRQ Domain\n");
+		return err;
+	}
+
+	err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
+						    &iobase);
+	if (err) {
+		dev_err(dev, "Getting bridge resources failed\n");
+		return err;
+	}
+
+	err = devm_request_pci_bus_resources(dev, &res);
+	if (err)
+		goto error;
+
+
+	list_splice_init(&res, &bridge->windows);
+	bridge->dev.parent = dev;
+	bridge->sysdata = port;
+	bridge->busnr = 0;
+	bridge->ops = &xilinx_pcie_ops;
+	bridge->map_irq = of_irq_parse_and_map_pci;
+	bridge->swizzle_irq = pci_common_swizzle;
+
+#ifdef CONFIG_PCI_MSI
+	xilinx_pcie_msi_chip.dev = dev;
+	bridge->msi = &xilinx_pcie_msi_chip;
+#endif
+	err = pci_scan_root_bus_bridge(bridge);
+	if (err < 0)
+		goto error;
+
+	bus = bridge->bus;
+
+	pci_assign_unassigned_bus_resources(bus);
+	list_for_each_entry(child, &bus->children, node)
+		pcie_bus_configure_settings(child);
+	pci_bus_add_devices(bus);
+	return 0;
+
+error:
+	pci_free_resource_list(&res);
+	return err;
+}
+
+static const struct of_device_id xilinx_pcie_of_match[] = {
+	{ .compatible = "xlnx,axi-pcie-host-1.00.a", },
+	{}
+};
+
+static struct platform_driver xilinx_pcie_driver = {
+	.driver = {
+		.name = "xilinx-pcie",
+		.of_match_table = xilinx_pcie_of_match,
+		.suppress_bind_attrs = true,
+	},
+	.probe = xilinx_pcie_probe,
+};
+builtin_platform_driver(xilinx_pcie_driver);
diff --git a/marvell/linux/drivers/pci/controller/vmd.c b/marvell/linux/drivers/pci/controller/vmd.c
new file mode 100644
index 0000000..9966dcf
--- /dev/null
+++ b/marvell/linux/drivers/pci/controller/vmd.c
@@ -0,0 +1,882 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Volume Management Device driver
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/pci.h>
+#include <linux/srcu.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+
+#include <asm/irqdomain.h>
+#include <asm/device.h>
+#include <asm/msi.h>
+#include <asm/msidef.h>
+
+#define VMD_CFGBAR	0
+#define VMD_MEMBAR1	2
+#define VMD_MEMBAR2	4
+
+#define PCI_REG_VMCAP		0x40
+#define BUS_RESTRICT_CAP(vmcap)	(vmcap & 0x1)
+#define PCI_REG_VMCONFIG	0x44
+#define BUS_RESTRICT_CFG(vmcfg)	((vmcfg >> 8) & 0x3)
+#define PCI_REG_VMLOCK		0x70
+#define MB2_SHADOW_EN(vmlock)	(vmlock & 0x2)
+
+#define MB2_SHADOW_OFFSET	0x2000
+#define MB2_SHADOW_SIZE		16
+
+enum vmd_features {
+	/*
+	 * Device may contain registers which hint the physical location of the
+	 * membars, in order to allow proper address translation during
+	 * resource assignment to enable guest virtualization
+	 */
+	VMD_FEAT_HAS_MEMBAR_SHADOW	= (1 << 0),
+
+	/*
+	 * Device may provide root port configuration information which limits
+	 * bus numbering
+	 */
+	VMD_FEAT_HAS_BUS_RESTRICTIONS	= (1 << 1),
+};
+
+/*
+ * Lock for manipulating VMD IRQ lists.
+ */
+static DEFINE_RAW_SPINLOCK(list_lock);
+
+/**
+ * struct vmd_irq - private data to map driver IRQ to the VMD shared vector
+ * @node:	list item for parent traversal.
+ * @irq:	back pointer to parent.
+ * @enabled:	true if driver enabled IRQ
+ * @virq:	the virtual IRQ value provided to the requesting driver.
+ *
+ * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to
+ * a VMD IRQ using this structure.
+ */
+struct vmd_irq {
+	struct list_head	node;
+	struct vmd_irq_list	*irq;
+	bool			enabled;
+	unsigned int		virq;
+};
+
+/**
+ * struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector
+ * @irq_list:	the list of irq's the VMD one demuxes to.
+ * @srcu:	SRCU struct for local synchronization.
+ * @count:	number of child IRQs assigned to this vector; used to track
+ *		sharing.
+ */
+struct vmd_irq_list {
+	struct list_head	irq_list;
+	struct srcu_struct	srcu;
+	unsigned int		count;
+};
+
+struct vmd_dev {
+	struct pci_dev		*dev;
+
+	spinlock_t		cfg_lock;
+	char __iomem		*cfgbar;
+
+	int msix_count;
+	struct vmd_irq_list	*irqs;
+
+	struct pci_sysdata	sysdata;
+	struct resource		resources[3];
+	struct irq_domain	*irq_domain;
+	struct pci_bus		*bus;
+	u8			busn_start;
+
+	struct dma_map_ops	dma_ops;
+	struct dma_domain	dma_domain;
+};
+
+static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus)
+{
+	return container_of(bus->sysdata, struct vmd_dev, sysdata);
+}
+
+static inline unsigned int index_from_irqs(struct vmd_dev *vmd,
+					   struct vmd_irq_list *irqs)
+{
+	return irqs - vmd->irqs;
+}
+
+/*
+ * Drivers managing a device in a VMD domain allocate their own IRQs as before,
+ * but the MSI entry for the hardware it's driving will be programmed with a
+ * destination ID for the VMD MSI-X table.  The VMD muxes interrupts in its
+ * domain into one of its own, and the VMD driver de-muxes these for the
+ * handlers sharing that VMD IRQ.  The vmd irq_domain provides the operations
+ * and irq_chip to set this up.
+ */
+static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+	struct vmd_irq *vmdirq = data->chip_data;
+	struct vmd_irq_list *irq = vmdirq->irq;
+	struct vmd_dev *vmd = irq_data_get_irq_handler_data(data);
+
+	msg->address_hi = MSI_ADDR_BASE_HI;
+	msg->address_lo = MSI_ADDR_BASE_LO |
+			  MSI_ADDR_DEST_ID(index_from_irqs(vmd, irq));
+	msg->data = 0;
+}
+
+/*
+ * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops.
+ */
+static void vmd_irq_enable(struct irq_data *data)
+{
+	struct vmd_irq *vmdirq = data->chip_data;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&list_lock, flags);
+	WARN_ON(vmdirq->enabled);
+	list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
+	vmdirq->enabled = true;
+	raw_spin_unlock_irqrestore(&list_lock, flags);
+
+	data->chip->irq_unmask(data);
+}
+
+static void vmd_irq_disable(struct irq_data *data)
+{
+	struct vmd_irq *vmdirq = data->chip_data;
+	unsigned long flags;
+
+	data->chip->irq_mask(data);
+
+	raw_spin_lock_irqsave(&list_lock, flags);
+	if (vmdirq->enabled) {
+		list_del_rcu(&vmdirq->node);
+		vmdirq->enabled = false;
+	}
+	raw_spin_unlock_irqrestore(&list_lock, flags);
+}
+
+/*
+ * XXX: Stubbed until we develop acceptable way to not create conflicts with
+ * other devices sharing the same vector.
+ */
+static int vmd_irq_set_affinity(struct irq_data *data,
+				const struct cpumask *dest, bool force)
+{
+	return -EINVAL;
+}
+
+static struct irq_chip vmd_msi_controller = {
+	.name			= "VMD-MSI",
+	.irq_enable		= vmd_irq_enable,
+	.irq_disable		= vmd_irq_disable,
+	.irq_compose_msi_msg	= vmd_compose_msi_msg,
+	.irq_set_affinity	= vmd_irq_set_affinity,
+};
+
+static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
+				     msi_alloc_info_t *arg)
+{
+	return 0;
+}
+
+/*
+ * XXX: We can be even smarter selecting the best IRQ once we solve the
+ * affinity problem.
+ */
+static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc)
+{
+	int i, best = 1;
+	unsigned long flags;
+
+	if (vmd->msix_count == 1)
+		return &vmd->irqs[0];
+
+	/*
+	 * White list for fast-interrupt handlers. All others will share the
+	 * "slow" interrupt vector.
+	 */
+	switch (msi_desc_to_pci_dev(desc)->class) {
+	case PCI_CLASS_STORAGE_EXPRESS:
+		break;
+	default:
+		return &vmd->irqs[0];
+	}
+
+	raw_spin_lock_irqsave(&list_lock, flags);
+	for (i = 1; i < vmd->msix_count; i++)
+		if (vmd->irqs[i].count < vmd->irqs[best].count)
+			best = i;
+	vmd->irqs[best].count++;
+	raw_spin_unlock_irqrestore(&list_lock, flags);
+
+	return &vmd->irqs[best];
+}
+
+static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
+			unsigned int virq, irq_hw_number_t hwirq,
+			msi_alloc_info_t *arg)
+{
+	struct msi_desc *desc = arg->desc;
+	struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus);
+	struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
+	unsigned int index, vector;
+
+	if (!vmdirq)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&vmdirq->node);
+	vmdirq->irq = vmd_next_irq(vmd, desc);
+	vmdirq->virq = virq;
+	index = index_from_irqs(vmd, vmdirq->irq);
+	vector = pci_irq_vector(vmd->dev, index);
+
+	irq_domain_set_info(domain, virq, vector, info->chip, vmdirq,
+			    handle_untracked_irq, vmd, NULL);
+	return 0;
+}
+
+static void vmd_msi_free(struct irq_domain *domain,
+			struct msi_domain_info *info, unsigned int virq)
+{
+	struct vmd_irq *vmdirq = irq_get_chip_data(virq);
+	unsigned long flags;
+
+	synchronize_srcu(&vmdirq->irq->srcu);
+
+	/* XXX: Potential optimization to rebalance */
+	raw_spin_lock_irqsave(&list_lock, flags);
+	vmdirq->irq->count--;
+	raw_spin_unlock_irqrestore(&list_lock, flags);
+
+	kfree(vmdirq);
+}
+
+static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev,
+			   int nvec, msi_alloc_info_t *arg)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
+
+	if (nvec > vmd->msix_count)
+		return vmd->msix_count;
+
+	memset(arg, 0, sizeof(*arg));
+	return 0;
+}
+
+static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
+{
+	arg->desc = desc;
+}
+
+static struct msi_domain_ops vmd_msi_domain_ops = {
+	.get_hwirq	= vmd_get_hwirq,
+	.msi_init	= vmd_msi_init,
+	.msi_free	= vmd_msi_free,
+	.msi_prepare	= vmd_msi_prepare,
+	.set_desc	= vmd_set_desc,
+};
+
+static struct msi_domain_info vmd_msi_domain_info = {
+	.flags		= MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+			  MSI_FLAG_PCI_MSIX,
+	.ops		= &vmd_msi_domain_ops,
+	.chip		= &vmd_msi_controller,
+};
+
+/*
+ * VMD replaces the requester ID with its own.  DMA mappings for devices in a
+ * VMD domain need to be mapped for the VMD, not the device requiring
+ * the mapping.
+ */
+static struct device *to_vmd_dev(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
+
+	return &vmd->dev->dev;
+}
+
+static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr,
+		       gfp_t flag, unsigned long attrs)
+{
+	return dma_alloc_attrs(to_vmd_dev(dev), size, addr, flag, attrs);
+}
+
+static void vmd_free(struct device *dev, size_t size, void *vaddr,
+		     dma_addr_t addr, unsigned long attrs)
+{
+	return dma_free_attrs(to_vmd_dev(dev), size, vaddr, addr, attrs);
+}
+
+static int vmd_mmap(struct device *dev, struct vm_area_struct *vma,
+		    void *cpu_addr, dma_addr_t addr, size_t size,
+		    unsigned long attrs)
+{
+	return dma_mmap_attrs(to_vmd_dev(dev), vma, cpu_addr, addr, size,
+			attrs);
+}
+
+static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt,
+			   void *cpu_addr, dma_addr_t addr, size_t size,
+			   unsigned long attrs)
+{
+	return dma_get_sgtable_attrs(to_vmd_dev(dev), sgt, cpu_addr, addr, size,
+			attrs);
+}
+
+static dma_addr_t vmd_map_page(struct device *dev, struct page *page,
+			       unsigned long offset, size_t size,
+			       enum dma_data_direction dir,
+			       unsigned long attrs)
+{
+	return dma_map_page_attrs(to_vmd_dev(dev), page, offset, size, dir,
+			attrs);
+}
+
+static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size,
+			   enum dma_data_direction dir, unsigned long attrs)
+{
+	dma_unmap_page_attrs(to_vmd_dev(dev), addr, size, dir, attrs);
+}
+
+static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+		      enum dma_data_direction dir, unsigned long attrs)
+{
+	return dma_map_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs);
+}
+
+static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+			 enum dma_data_direction dir, unsigned long attrs)
+{
+	dma_unmap_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs);
+}
+
+static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
+				    size_t size, enum dma_data_direction dir)
+{
+	dma_sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir);
+}
+
+static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr,
+				       size_t size, enum dma_data_direction dir)
+{
+	dma_sync_single_for_device(to_vmd_dev(dev), addr, size, dir);
+}
+
+static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+				int nents, enum dma_data_direction dir)
+{
+	dma_sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir);
+}
+
+static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+				   int nents, enum dma_data_direction dir)
+{
+	dma_sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir);
+}
+
+static int vmd_dma_supported(struct device *dev, u64 mask)
+{
+	return dma_supported(to_vmd_dev(dev), mask);
+}
+
+static u64 vmd_get_required_mask(struct device *dev)
+{
+	return dma_get_required_mask(to_vmd_dev(dev));
+}
+
+static void vmd_teardown_dma_ops(struct vmd_dev *vmd)
+{
+	struct dma_domain *domain = &vmd->dma_domain;
+
+	if (get_dma_ops(&vmd->dev->dev))
+		del_dma_domain(domain);
+}
+
+#define ASSIGN_VMD_DMA_OPS(source, dest, fn)	\
+	do {					\
+		if (source->fn)			\
+			dest->fn = vmd_##fn;	\
+	} while (0)
+
+static void vmd_setup_dma_ops(struct vmd_dev *vmd)
+{
+	const struct dma_map_ops *source = get_dma_ops(&vmd->dev->dev);
+	struct dma_map_ops *dest = &vmd->dma_ops;
+	struct dma_domain *domain = &vmd->dma_domain;
+
+	domain->domain_nr = vmd->sysdata.domain;
+	domain->dma_ops = dest;
+
+	if (!source)
+		return;
+	ASSIGN_VMD_DMA_OPS(source, dest, alloc);
+	ASSIGN_VMD_DMA_OPS(source, dest, free);
+	ASSIGN_VMD_DMA_OPS(source, dest, mmap);
+	ASSIGN_VMD_DMA_OPS(source, dest, get_sgtable);
+	ASSIGN_VMD_DMA_OPS(source, dest, map_page);
+	ASSIGN_VMD_DMA_OPS(source, dest, unmap_page);
+	ASSIGN_VMD_DMA_OPS(source, dest, map_sg);
+	ASSIGN_VMD_DMA_OPS(source, dest, unmap_sg);
+	ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_cpu);
+	ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device);
+	ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu);
+	ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device);
+	ASSIGN_VMD_DMA_OPS(source, dest, dma_supported);
+	ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask);
+	add_dma_domain(domain);
+}
+#undef ASSIGN_VMD_DMA_OPS
+
+static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
+				  unsigned int devfn, int reg, int len)
+{
+	char __iomem *addr = vmd->cfgbar +
+			     ((bus->number - vmd->busn_start) << 20) +
+			     (devfn << 12) + reg;
+
+	if ((addr - vmd->cfgbar) + len >=
+	    resource_size(&vmd->dev->resource[VMD_CFGBAR]))
+		return NULL;
+
+	return addr;
+}
+
+/*
+ * CPU may deadlock if config space is not serialized on some versions of this
+ * hardware, so all config space access is done under a spinlock.
+ */
+static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
+			int len, u32 *value)
+{
+	struct vmd_dev *vmd = vmd_from_bus(bus);
+	char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
+	unsigned long flags;
+	int ret = 0;
+
+	if (!addr)
+		return -EFAULT;
+
+	spin_lock_irqsave(&vmd->cfg_lock, flags);
+	switch (len) {
+	case 1:
+		*value = readb(addr);
+		break;
+	case 2:
+		*value = readw(addr);
+		break;
+	case 4:
+		*value = readl(addr);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	spin_unlock_irqrestore(&vmd->cfg_lock, flags);
+	return ret;
+}
+
+/*
+ * VMD h/w converts non-posted config writes to posted memory writes. The
+ * read-back in this function forces the completion so it returns only after
+ * the config space was written, as expected.
+ */
+static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
+			 int len, u32 value)
+{
+	struct vmd_dev *vmd = vmd_from_bus(bus);
+	char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
+	unsigned long flags;
+	int ret = 0;
+
+	if (!addr)
+		return -EFAULT;
+
+	spin_lock_irqsave(&vmd->cfg_lock, flags);
+	switch (len) {
+	case 1:
+		writeb(value, addr);
+		readb(addr);
+		break;
+	case 2:
+		writew(value, addr);
+		readw(addr);
+		break;
+	case 4:
+		writel(value, addr);
+		readl(addr);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	spin_unlock_irqrestore(&vmd->cfg_lock, flags);
+	return ret;
+}
+
+static struct pci_ops vmd_ops = {
+	.read		= vmd_pci_read,
+	.write		= vmd_pci_write,
+};
+
+static void vmd_attach_resources(struct vmd_dev *vmd)
+{
+	vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1];
+	vmd->dev->resource[VMD_MEMBAR2].child = &vmd->resources[2];
+}
+
+static void vmd_detach_resources(struct vmd_dev *vmd)
+{
+	vmd->dev->resource[VMD_MEMBAR1].child = NULL;
+	vmd->dev->resource[VMD_MEMBAR2].child = NULL;
+}
+
+/*
+ * VMD domains start at 0x10000 to not clash with ACPI _SEG domains.
+ * Per ACPI r6.0, sec 6.5.6,  _SEG returns an integer, of which the lower
+ * 16 bits are the PCI Segment Group (domain) number.  Other bits are
+ * currently reserved.
+ */
+static int vmd_find_free_domain(void)
+{
+	int domain = 0xffff;
+	struct pci_bus *bus = NULL;
+
+	while ((bus = pci_find_next_bus(bus)) != NULL)
+		domain = max_t(int, domain, pci_domain_nr(bus));
+	return domain + 1;
+}
+
+static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
+{
+	struct pci_sysdata *sd = &vmd->sysdata;
+	struct fwnode_handle *fn;
+	struct resource *res;
+	u32 upper_bits;
+	unsigned long flags;
+	LIST_HEAD(resources);
+	resource_size_t offset[2] = {0};
+	resource_size_t membar2_offset = 0x2000;
+	struct pci_bus *child;
+
+	/*
+	 * Shadow registers may exist in certain VMD device ids which allow
+	 * guests to correctly assign host physical addresses to the root ports
+	 * and child devices. These registers will either return the host value
+	 * or 0, depending on an enable bit in the VMD device.
+	 */
+	if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) {
+		u32 vmlock;
+		int ret;
+
+		membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE;
+		ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock);
+		if (ret || vmlock == ~0)
+			return -ENODEV;
+
+		if (MB2_SHADOW_EN(vmlock)) {
+			void __iomem *membar2;
+
+			membar2 = pci_iomap(vmd->dev, VMD_MEMBAR2, 0);
+			if (!membar2)
+				return -ENOMEM;
+			offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
+					(readq(membar2 + MB2_SHADOW_OFFSET) &
+					 PCI_BASE_ADDRESS_MEM_MASK);
+			offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
+					(readq(membar2 + MB2_SHADOW_OFFSET + 8) &
+					 PCI_BASE_ADDRESS_MEM_MASK);
+			pci_iounmap(vmd->dev, membar2);
+		}
+	}
+
+	/*
+	 * Certain VMD devices may have a root port configuration option which
+	 * limits the bus range to between 0-127 or 128-255
+	 */
+	if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) {
+		u32 vmcap, vmconfig;
+
+		pci_read_config_dword(vmd->dev, PCI_REG_VMCAP, &vmcap);
+		pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig);
+		if (BUS_RESTRICT_CAP(vmcap) &&
+		    (BUS_RESTRICT_CFG(vmconfig) == 0x1))
+			vmd->busn_start = 128;
+	}
+
+	res = &vmd->dev->resource[VMD_CFGBAR];
+	vmd->resources[0] = (struct resource) {
+		.name  = "VMD CFGBAR",
+		.start = vmd->busn_start,
+		.end   = vmd->busn_start + (resource_size(res) >> 20) - 1,
+		.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
+	};
+
+	/*
+	 * If the window is below 4GB, clear IORESOURCE_MEM_64 so we can
+	 * put 32-bit resources in the window.
+	 *
+	 * There's no hardware reason why a 64-bit window *couldn't*
+	 * contain a 32-bit resource, but pbus_size_mem() computes the
+	 * bridge window size assuming a 64-bit window will contain no
+	 * 32-bit resources.  __pci_assign_resource() enforces that
+	 * artificial restriction to make sure everything will fit.
+	 *
+	 * The only way we could use a 64-bit non-prefetchable MEMBAR is
+	 * if its address is <4GB so that we can convert it to a 32-bit
+	 * resource.  To be visible to the host OS, all VMD endpoints must
+	 * be initially configured by platform BIOS, which includes setting
+	 * up these resources.  We can assume the device is configured
+	 * according to the platform needs.
+	 */
+	res = &vmd->dev->resource[VMD_MEMBAR1];
+	upper_bits = upper_32_bits(res->end);
+	flags = res->flags & ~IORESOURCE_SIZEALIGN;
+	if (!upper_bits)
+		flags &= ~IORESOURCE_MEM_64;
+	vmd->resources[1] = (struct resource) {
+		.name  = "VMD MEMBAR1",
+		.start = res->start,
+		.end   = res->end,
+		.flags = flags,
+		.parent = res,
+	};
+
+	res = &vmd->dev->resource[VMD_MEMBAR2];
+	upper_bits = upper_32_bits(res->end);
+	flags = res->flags & ~IORESOURCE_SIZEALIGN;
+	if (!upper_bits)
+		flags &= ~IORESOURCE_MEM_64;
+	vmd->resources[2] = (struct resource) {
+		.name  = "VMD MEMBAR2",
+		.start = res->start + membar2_offset,
+		.end   = res->end,
+		.flags = flags,
+		.parent = res,
+	};
+
+	sd->vmd_domain = true;
+	sd->domain = vmd_find_free_domain();
+	if (sd->domain < 0)
+		return sd->domain;
+
+	sd->node = pcibus_to_node(vmd->dev->bus);
+
+	fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain);
+	if (!fn)
+		return -ENODEV;
+
+	vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info,
+						    x86_vector_domain);
+	if (!vmd->irq_domain) {
+		irq_domain_free_fwnode(fn);
+		return -ENODEV;
+	}
+
+	pci_add_resource(&resources, &vmd->resources[0]);
+	pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
+	pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]);
+
+	vmd->bus = pci_create_root_bus(&vmd->dev->dev, vmd->busn_start,
+				       &vmd_ops, sd, &resources);
+	if (!vmd->bus) {
+		pci_free_resource_list(&resources);
+		irq_domain_remove(vmd->irq_domain);
+		irq_domain_free_fwnode(fn);
+		return -ENODEV;
+	}
+
+	vmd_attach_resources(vmd);
+	vmd_setup_dma_ops(vmd);
+	dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
+
+	pci_scan_child_bus(vmd->bus);
+	pci_assign_unassigned_bus_resources(vmd->bus);
+
+	/*
+	 * VMD root buses are virtual and don't return true on pci_is_pcie()
+	 * and will fail pcie_bus_configure_settings() early. It can instead be
+	 * run on each of the real root ports.
+	 */
+	list_for_each_entry(child, &vmd->bus->children, node)
+		pcie_bus_configure_settings(child);
+
+	pci_bus_add_devices(vmd->bus);
+
+	WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
+			       "domain"), "Can't create symlink to domain\n");
+	return 0;
+}
+
+static irqreturn_t vmd_irq(int irq, void *data)
+{
+	struct vmd_irq_list *irqs = data;
+	struct vmd_irq *vmdirq;
+	int idx;
+
+	idx = srcu_read_lock(&irqs->srcu);
+	list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node)
+		generic_handle_irq(vmdirq->virq);
+	srcu_read_unlock(&irqs->srcu, idx);
+
+	return IRQ_HANDLED;
+}
+
+static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	struct vmd_dev *vmd;
+	int i, err;
+
+	if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
+		return -ENOMEM;
+
+	vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL);
+	if (!vmd)
+		return -ENOMEM;
+
+	vmd->dev = dev;
+	err = pcim_enable_device(dev);
+	if (err < 0)
+		return err;
+
+	vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0);
+	if (!vmd->cfgbar)
+		return -ENOMEM;
+
+	pci_set_master(dev);
+	if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) &&
+	    dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)))
+		return -ENODEV;
+
+	vmd->msix_count = pci_msix_vec_count(dev);
+	if (vmd->msix_count < 0)
+		return -ENODEV;
+
+	vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count,
+					PCI_IRQ_MSIX);
+	if (vmd->msix_count < 0)
+		return vmd->msix_count;
+
+	vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs),
+				 GFP_KERNEL);
+	if (!vmd->irqs)
+		return -ENOMEM;
+
+	for (i = 0; i < vmd->msix_count; i++) {
+		err = init_srcu_struct(&vmd->irqs[i].srcu);
+		if (err)
+			return err;
+
+		INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
+		err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
+				       vmd_irq, IRQF_NO_THREAD,
+				       "vmd", &vmd->irqs[i]);
+		if (err)
+			return err;
+	}
+
+	spin_lock_init(&vmd->cfg_lock);
+	pci_set_drvdata(dev, vmd);
+	err = vmd_enable_domain(vmd, (unsigned long) id->driver_data);
+	if (err)
+		return err;
+
+	dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n",
+		 vmd->sysdata.domain);
+	return 0;
+}
+
+static void vmd_cleanup_srcu(struct vmd_dev *vmd)
+{
+	int i;
+
+	for (i = 0; i < vmd->msix_count; i++)
+		cleanup_srcu_struct(&vmd->irqs[i].srcu);
+}
+
+static void vmd_remove(struct pci_dev *dev)
+{
+	struct vmd_dev *vmd = pci_get_drvdata(dev);
+	struct fwnode_handle *fn = vmd->irq_domain->fwnode;
+
+	sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
+	pci_stop_root_bus(vmd->bus);
+	pci_remove_root_bus(vmd->bus);
+	vmd_cleanup_srcu(vmd);
+	vmd_teardown_dma_ops(vmd);
+	vmd_detach_resources(vmd);
+	irq_domain_remove(vmd->irq_domain);
+	irq_domain_free_fwnode(fn);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int vmd_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct vmd_dev *vmd = pci_get_drvdata(pdev);
+	int i;
+
+	for (i = 0; i < vmd->msix_count; i++)
+                devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]);
+
+	pci_save_state(pdev);
+	return 0;
+}
+
+static int vmd_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct vmd_dev *vmd = pci_get_drvdata(pdev);
+	int err, i;
+
+	for (i = 0; i < vmd->msix_count; i++) {
+		err = devm_request_irq(dev, pci_irq_vector(pdev, i),
+				       vmd_irq, IRQF_NO_THREAD,
+				       "vmd", &vmd->irqs[i]);
+		if (err)
+			return err;
+	}
+
+	pci_restore_state(pdev);
+	return 0;
+}
+#endif
+static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume);
+
+static const struct pci_device_id vmd_ids[] = {
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
+		.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
+				VMD_FEAT_HAS_BUS_RESTRICTIONS,},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
+		.driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,},
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, vmd_ids);
+
+static struct pci_driver vmd_drv = {
+	.name		= "vmd",
+	.id_table	= vmd_ids,
+	.probe		= vmd_probe,
+	.remove		= vmd_remove,
+	.driver		= {
+		.pm	= &vmd_dev_pm_ops,
+	},
+};
+module_pci_driver(vmd_drv);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.6");