| From 21d86ac5a6c0ede443676cb455e988adcc0e3762 Mon Sep 17 00:00:00 2001 |
| From: Xiaowei Bao <xiaowei.bao@nxp.com> |
| Date: Sat, 5 Jan 2019 16:06:43 +0800 |
| Subject: [PATCH] PCI: mobiveil: Add the EP driver support |
| |
| Add the EP driver support for Mobiveil base on endpoint framework. |
| |
| Signed-off-by: Xiaowei Bao <xiaowei.bao@nxp.com> |
| [Zhiqiang: Correct the Copyright] |
| Signed-off-by: Hou Zhiqiang <Zhiqiang.Hou@nxp.com> |
| --- |
| MAINTAINERS | 1 + |
| drivers/pci/controller/mobiveil/Kconfig | 5 + |
| drivers/pci/controller/mobiveil/Makefile | 1 + |
| drivers/pci/controller/mobiveil/pcie-mobiveil-ep.c | 568 +++++++++++++++++++++ |
| drivers/pci/controller/mobiveil/pcie-mobiveil.c | 99 +++- |
| drivers/pci/controller/mobiveil/pcie-mobiveil.h | 68 +++ |
| 6 files changed, 734 insertions(+), 8 deletions(-) |
| create mode 100644 drivers/pci/controller/mobiveil/pcie-mobiveil-ep.c |
| |
| --- a/MAINTAINERS |
| +++ b/MAINTAINERS |
| @@ -12500,6 +12500,7 @@ F: drivers/ntb/hw/mscc/ |
| PCI DRIVER FOR MOBIVEIL PCIE IP |
| M: Karthikeyan Mitran <m.karthikeyan@mobiveil.co.in> |
| M: Hou Zhiqiang <Zhiqiang.Hou@nxp.com> |
| +M: Xiaowei Bao <xiaowei.bao@nxp.com> |
| L: linux-pci@vger.kernel.org |
| S: Supported |
| F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt |
| --- a/drivers/pci/controller/mobiveil/Kconfig |
| +++ b/drivers/pci/controller/mobiveil/Kconfig |
| @@ -11,6 +11,11 @@ config PCIE_MOBIVEIL_HOST |
| depends on PCI_MSI_IRQ_DOMAIN |
| select PCIE_MOBIVEIL |
| |
| +config PCIE_MOBIVEIL_EP |
| + bool |
| + depends on PCI_ENDPOINT |
| + select PCIE_MOBIVEIL |
| + |
| config PCIE_MOBIVEIL_PLAT |
| bool "Mobiveil AXI PCIe controller" |
| depends on ARCH_ZYNQMP || COMPILE_TEST |
| --- a/drivers/pci/controller/mobiveil/Makefile |
| +++ b/drivers/pci/controller/mobiveil/Makefile |
| @@ -1,5 +1,6 @@ |
| # SPDX-License-Identifier: GPL-2.0 |
| obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o |
| obj-$(CONFIG_PCIE_MOBIVEIL_HOST) += pcie-mobiveil-host.o |
| +obj-$(CONFIG_PCIE_MOBIVEIL_EP) += pcie-mobiveil-ep.o |
| obj-$(CONFIG_PCIE_MOBIVEIL_PLAT) += pcie-mobiveil-plat.o |
| obj-$(CONFIG_PCIE_LAYERSCAPE_GEN4) += pcie-layerscape-gen4.o |
| --- /dev/null |
| +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-ep.c |
| @@ -0,0 +1,568 @@ |
| +// SPDX-License-Identifier: GPL-2.0 |
| +/* |
| + * Mobiveil PCIe Endpoint controller driver |
| + * |
| + * Copyright 2019 NXP |
| + * Author: Xiaowei Bao <xiaowei.bao@nxp.com> |
| + */ |
| + |
| +#include <linux/of.h> |
| +#include <linux/pci-epc.h> |
| +#include <linux/pci-epf.h> |
| +#include <linux/platform_device.h> |
| +#include "pcie-mobiveil.h" |
| + |
| +static void mobiveil_pcie_ep_func_select(struct mobiveil_pcie *pcie, u8 func_no) |
| +{ |
| + u32 func_num; |
| + |
| + /* |
| + * select to access the config space of func_no by setting func_no |
| + * to FUNC_SEL_SHIFT bit of PAB_CTRL register. |
| + */ |
| + func_num = csr_readl(pcie, PAB_CTRL); |
| + func_num &= ~(FUNC_SEL_MASK << FUNC_SEL_SHIFT); |
| + func_num |= (func_no & FUNC_SEL_MASK) << FUNC_SEL_SHIFT; |
| + csr_writel(pcie, func_num, PAB_CTRL); |
| +} |
| + |
| +static void mobiveil_pcie_ep_func_deselect(struct mobiveil_pcie *pcie) |
| +{ |
| + u32 func_num; |
| + |
| + /* |
| + * clear the FUNC_SEL_SHIFT bits when access other registers except |
| + * config space register. |
| + */ |
| + func_num = csr_readl(pcie, PAB_CTRL); |
| + func_num &= ~(FUNC_SEL_MASK << FUNC_SEL_SHIFT); |
| + csr_writel(pcie, func_num, PAB_CTRL); |
| +} |
| + |
| +static void __mobiveil_pcie_ep_reset_bar(struct mobiveil_pcie *pcie, u8 bar) |
| +{ |
| + csr_writel(pcie, bar, GPEX_BAR_SELECT); |
| + csr_writel(pcie, 0, GPEX_BAR_SIZE_LDW); |
| + csr_writel(pcie, 0, GPEX_BAR_SIZE_UDW); |
| +} |
| + |
| +void mobiveil_pcie_ep_reset_bar(struct mobiveil_pcie *pcie, u8 bar) |
| +{ |
| + __mobiveil_pcie_ep_reset_bar(pcie, bar); |
| +} |
| + |
| +static u8 __mobiveil_pcie_ep_find_next_cap(struct mobiveil_pcie *pcie, |
| + u8 func_no, u8 cap_ptr, u8 cap) |
| +{ |
| + u8 cap_id, next_cap_ptr; |
| + u16 reg; |
| + |
| + if (!cap_ptr) |
| + return 0; |
| + |
| + mobiveil_pcie_ep_func_select(pcie, func_no); |
| + |
| + reg = csr_readw(pcie, cap_ptr); |
| + cap_id = (reg & 0x00ff); |
| + |
| + mobiveil_pcie_ep_func_deselect(pcie); |
| + |
| + if (cap_id > PCI_CAP_ID_MAX) |
| + return 0; |
| + |
| + if (cap_id == cap) |
| + return cap_ptr; |
| + |
| + next_cap_ptr = (reg & 0xff00) >> 8; |
| + return __mobiveil_pcie_ep_find_next_cap(pcie, func_no, |
| + next_cap_ptr, cap); |
| +} |
| + |
| +static u8 mobiveil_pcie_ep_find_capability(struct mobiveil_pcie_ep *ep, |
| + u8 func_no, u8 cap) |
| +{ |
| + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep); |
| + u8 next_cap_ptr; |
| + u16 reg; |
| + |
| + mobiveil_pcie_ep_func_select(pcie, func_no); |
| + |
| + reg = csr_readw(pcie, PCI_CAPABILITY_LIST); |
| + next_cap_ptr = (reg & 0x00ff); |
| + |
| + mobiveil_pcie_ep_func_deselect(pcie); |
| + |
| + return __mobiveil_pcie_ep_find_next_cap(pcie, func_no, |
| + next_cap_ptr, cap); |
| +} |
| + |
| +static int mobiveil_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, |
| + struct pci_epf_header *hdr) |
| +{ |
| + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc); |
| + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep); |
| + |
| + mobiveil_pcie_ep_func_select(pcie, func_no); |
| + |
| + csr_writew(pcie, hdr->vendorid, PCI_VENDOR_ID); |
| + csr_writew(pcie, hdr->deviceid, PCI_DEVICE_ID); |
| + csr_writeb(pcie, hdr->revid, PCI_REVISION_ID); |
| + csr_writeb(pcie, hdr->progif_code, PCI_CLASS_PROG); |
| + csr_writew(pcie, hdr->subclass_code | hdr->baseclass_code << 8, |
| + PCI_CLASS_DEVICE); |
| + csr_writeb(pcie, hdr->cache_line_size, PCI_CACHE_LINE_SIZE); |
| + csr_writew(pcie, hdr->subsys_vendor_id, PCI_SUBSYSTEM_VENDOR_ID); |
| + csr_writew(pcie, hdr->subsys_id, PCI_SUBSYSTEM_ID); |
| + csr_writeb(pcie, hdr->interrupt_pin, PCI_INTERRUPT_PIN); |
| + |
| + mobiveil_pcie_ep_func_deselect(pcie); |
| + |
| + return 0; |
| +} |
| + |
| +static void mobiveil_pcie_ep_inbound_win(struct mobiveil_pcie_ep *ep, |
| + u8 func_no, enum pci_barno bar, |
| + dma_addr_t cpu_addr) |
| +{ |
| + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep); |
| + |
| + program_ib_windows_ep(pcie, func_no, bar, cpu_addr); |
| +} |
| + |
| +static int mobiveil_pcie_ep_outbound_win(struct mobiveil_pcie_ep *ep, |
| + phys_addr_t phys_addr, |
| + u64 pci_addr, u8 func_no, |
| + size_t size) |
| +{ |
| + u32 free_win; |
| + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep); |
| + |
| + free_win = find_first_zero_bit(ep->apio_wins_map, ep->apio_wins); |
| + if (free_win >= ep->apio_wins) { |
| + dev_err(&pcie->pdev->dev, "No free outbound window\n"); |
| + return -EINVAL; |
| + } |
| + |
| + program_ob_windows_ep(pcie, func_no, free_win, phys_addr, |
| + pci_addr, MEM_WINDOW_TYPE, size); |
| + |
| + set_bit(free_win, ep->apio_wins_map); |
| + ep->apio_addr[free_win] = phys_addr; |
| + |
| + return 0; |
| +} |
| + |
| +static void mobiveil_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, |
| + struct pci_epf_bar *epf_bar) |
| +{ |
| + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc); |
| + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep); |
| + enum pci_barno bar = epf_bar->barno; |
| + |
| + if (bar < ep->bar_num) { |
| + __mobiveil_pcie_ep_reset_bar(pcie, func_no * ep->bar_num + bar); |
| + |
| + mobiveil_pcie_disable_ib_win_ep(pcie, func_no, bar); |
| + } |
| +} |
| + |
| +static int mobiveil_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, |
| + struct pci_epf_bar *epf_bar) |
| +{ |
| + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc); |
| + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep); |
| + enum pci_barno bar = epf_bar->barno; |
| + size_t size = epf_bar->size; |
| + |
| + if (bar < ep->bar_num) { |
| + mobiveil_pcie_ep_inbound_win(ep, func_no, bar, |
| + epf_bar->phys_addr); |
| + |
| + csr_writel(pcie, func_no * ep->bar_num + bar, |
| + GPEX_BAR_SELECT); |
| + csr_writel(pcie, lower_32_bits(~(size - 1)), |
| + GPEX_BAR_SIZE_LDW); |
| + csr_writel(pcie, upper_32_bits(~(size - 1)), |
| + GPEX_BAR_SIZE_UDW); |
| + } |
| + |
| + return 0; |
| +} |
| + |
| +static int mobiveil_pcie_find_index(struct mobiveil_pcie_ep *ep, |
| + phys_addr_t addr, |
| + u32 *atu_index) |
| +{ |
| + u32 index; |
| + |
| + for (index = 0; index < ep->apio_wins; index++) { |
| + if (ep->apio_addr[index] != addr) |
| + continue; |
| + *atu_index = index; |
| + return 0; |
| + } |
| + |
| + return -EINVAL; |
| +} |
| + |
| +static void mobiveil_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, |
| + phys_addr_t addr) |
| +{ |
| + int ret; |
| + u32 atu_index; |
| + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc); |
| + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep); |
| + |
| + ret = mobiveil_pcie_find_index(ep, addr, &atu_index); |
| + if (ret < 0) |
| + return; |
| + |
| + mobiveil_pcie_disable_ob_win(pcie, atu_index); |
| + clear_bit(atu_index, ep->apio_wins_map); |
| +} |
| + |
| +static int mobiveil_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, |
| + phys_addr_t addr, |
| + u64 pci_addr, size_t size) |
| +{ |
| + int ret; |
| + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc); |
| + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep); |
| + |
| + ret = mobiveil_pcie_ep_outbound_win(ep, addr, pci_addr, func_no, size); |
| + if (ret) { |
| + dev_err(&pcie->pdev->dev, "Failed to enable address\n"); |
| + return ret; |
| + } |
| + |
| + return 0; |
| +} |
| + |
| +static int mobiveil_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no) |
| +{ |
| + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc); |
| + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep); |
| + u32 val, reg; |
| + u8 msi_cap; |
| + |
| + msi_cap = mobiveil_pcie_ep_find_capability(ep, func_no, |
| + PCI_CAP_ID_MSI); |
| + if (!msi_cap) |
| + return -EINVAL; |
| + |
| + mobiveil_pcie_ep_func_select(pcie, func_no); |
| + |
| + reg = msi_cap + PCI_MSI_FLAGS; |
| + val = csr_readw(pcie, reg); |
| + |
| + mobiveil_pcie_ep_func_deselect(pcie); |
| + |
| + if (!(val & PCI_MSI_FLAGS_ENABLE)) |
| + return -EINVAL; |
| + |
| + val = (val & PCI_MSI_FLAGS_QSIZE) >> 4; |
| + |
| + return val; |
| +} |
| + |
| +static int mobiveil_pcie_ep_set_msi(struct pci_epc *epc, |
| + u8 func_no, u8 interrupts) |
| +{ |
| + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc); |
| + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep); |
| + u32 val, reg; |
| + u8 msi_cap; |
| + |
| + msi_cap = mobiveil_pcie_ep_find_capability(ep, func_no, |
| + PCI_CAP_ID_MSI); |
| + if (!msi_cap) |
| + return -EINVAL; |
| + |
| + mobiveil_pcie_ep_func_select(pcie, func_no); |
| + |
| + reg = msi_cap + PCI_MSI_FLAGS; |
| + val = csr_readw(pcie, reg); |
| + val &= ~PCI_MSI_FLAGS_QMASK; |
| + val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK; |
| + csr_writew(pcie, val, reg); |
| + |
| + mobiveil_pcie_ep_func_deselect(pcie); |
| + |
| + return 0; |
| +} |
| + |
| +static int mobiveil_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no) |
| +{ |
| + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc); |
| + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep); |
| + u32 val, reg; |
| + u8 msix_cap; |
| + |
| + msix_cap = mobiveil_pcie_ep_find_capability(ep, func_no, |
| + PCI_CAP_ID_MSIX); |
| + if (!msix_cap) |
| + return -EINVAL; |
| + |
| + mobiveil_pcie_ep_func_select(pcie, func_no); |
| + |
| + reg = msix_cap + PCI_MSIX_FLAGS; |
| + val = csr_readw(pcie, reg); |
| + |
| + mobiveil_pcie_ep_func_deselect(pcie); |
| + |
| + if (!(val & PCI_MSIX_FLAGS_ENABLE)) |
| + return -EINVAL; |
| + |
| + val &= PCI_MSIX_FLAGS_QSIZE; |
| + |
| + return val; |
| +} |
| + |
| +static int mobiveil_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, |
| + u16 interrupts) |
| +{ |
| + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc); |
| + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep); |
| + u32 val, reg; |
| + u8 msix_cap; |
| + |
| + msix_cap = mobiveil_pcie_ep_find_capability(ep, func_no, |
| + PCI_CAP_ID_MSIX); |
| + if (!msix_cap) |
| + return -EINVAL; |
| + |
| + mobiveil_pcie_ep_func_select(pcie, func_no); |
| + |
| + reg = msix_cap + PCI_MSIX_FLAGS; |
| + val = csr_readw(pcie, reg); |
| + val &= ~PCI_MSIX_FLAGS_QSIZE; |
| + val |= interrupts; |
| + csr_writew(pcie, val, reg); |
| + |
| + mobiveil_pcie_ep_func_deselect(pcie); |
| + |
| + return 0; |
| +} |
| + |
| +static int mobiveil_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, |
| + enum pci_epc_irq_type type, |
| + u16 interrupt_num) |
| +{ |
| + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc); |
| + |
| + if (!ep->ops->raise_irq) |
| + return -EINVAL; |
| + |
| + return ep->ops->raise_irq(ep, func_no, type, interrupt_num); |
| +} |
| + |
| +static const struct pci_epc_features* |
| +mobiveil_pcie_ep_get_features(struct pci_epc *epc, u8 func_no) |
| +{ |
| + struct mobiveil_pcie_ep *ep = epc_get_drvdata(epc); |
| + |
| + if (!ep->ops->get_features) |
| + return NULL; |
| + |
| + return ep->ops->get_features(ep); |
| +} |
| + |
| +static const struct pci_epc_ops epc_ops = { |
| + .write_header = mobiveil_pcie_ep_write_header, |
| + .set_bar = mobiveil_pcie_ep_set_bar, |
| + .clear_bar = mobiveil_pcie_ep_clear_bar, |
| + .map_addr = mobiveil_pcie_ep_map_addr, |
| + .unmap_addr = mobiveil_pcie_ep_unmap_addr, |
| + .set_msi = mobiveil_pcie_ep_set_msi, |
| + .get_msi = mobiveil_pcie_ep_get_msi, |
| + .set_msix = mobiveil_pcie_ep_set_msix, |
| + .get_msix = mobiveil_pcie_ep_get_msix, |
| + .raise_irq = mobiveil_pcie_ep_raise_irq, |
| + .get_features = mobiveil_pcie_ep_get_features, |
| +}; |
| + |
| +int mobiveil_pcie_ep_raise_legacy_irq(struct mobiveil_pcie_ep *ep, u8 func_no) |
| +{ |
| + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep); |
| + |
| + dev_err(&pcie->pdev->dev, "EP cannot trigger legacy IRQs\n"); |
| + |
| + return -EINVAL; |
| +} |
| + |
| +int mobiveil_pcie_ep_raise_msi_irq(struct mobiveil_pcie_ep *ep, u8 func_no, |
| + u8 interrupt_num) |
| +{ |
| + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep); |
| + struct pci_epc *epc = ep->epc; |
| + u16 msg_ctrl, msg_data; |
| + u32 msg_addr_lower, msg_addr_upper, reg; |
| + u64 msg_addr; |
| + bool has_upper; |
| + int ret; |
| + u8 msi_cap; |
| + |
| + msi_cap = mobiveil_pcie_ep_find_capability(ep, func_no, |
| + PCI_CAP_ID_MSI); |
| + if (!msi_cap) |
| + return -EINVAL; |
| + |
| + mobiveil_pcie_ep_func_select(pcie, func_no); |
| + |
| + reg = msi_cap + PCI_MSI_FLAGS; |
| + msg_ctrl = csr_readw(pcie, reg); |
| + has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT); |
| + reg = msi_cap + PCI_MSI_ADDRESS_LO; |
| + msg_addr_lower = csr_readl(pcie, reg); |
| + if (has_upper) { |
| + reg = msi_cap + PCI_MSI_ADDRESS_HI; |
| + msg_addr_upper = csr_readl(pcie, reg); |
| + reg = msi_cap + PCI_MSI_DATA_64; |
| + msg_data = csr_readw(pcie, reg); |
| + } else { |
| + msg_addr_upper = 0; |
| + reg = msi_cap + PCI_MSI_DATA_32; |
| + msg_data = csr_readw(pcie, reg); |
| + } |
| + msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower; |
| + |
| + mobiveil_pcie_ep_func_deselect(pcie); |
| + |
| + ret = mobiveil_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, |
| + msg_addr, epc->mem->page_size); |
| + if (ret) |
| + return ret; |
| + |
| + writel(msg_data | (interrupt_num - 1), ep->msi_mem); |
| + |
| + mobiveil_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys); |
| + |
| + return 0; |
| +} |
| + |
| +int mobiveil_pcie_ep_raise_msix_irq(struct mobiveil_pcie_ep *ep, u8 func_no, |
| + u16 interrupt_num) |
| +{ |
| + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep); |
| + struct pci_epc *epc = ep->epc; |
| + u32 msg_addr_upper, msg_addr_lower; |
| + u32 msg_data; |
| + u64 msg_addr; |
| + u8 msix_cap; |
| + int ret; |
| + |
| + msix_cap = mobiveil_pcie_ep_find_capability(ep, func_no, |
| + PCI_CAP_ID_MSIX); |
| + if (!msix_cap) |
| + return -EINVAL; |
| + |
| + mobiveil_pcie_ep_func_deselect(pcie); |
| + |
| + msg_addr_lower = csr_readl(pcie, PAB_MSIX_TABLE_PBA_ACCESS + |
| + PCI_MSIX_ENTRY_LOWER_ADDR + |
| + (interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE); |
| + msg_addr_upper = csr_readl(pcie, PAB_MSIX_TABLE_PBA_ACCESS + |
| + PCI_MSIX_ENTRY_UPPER_ADDR + |
| + (interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE); |
| + msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower; |
| + msg_data = csr_readl(pcie, PAB_MSIX_TABLE_PBA_ACCESS + |
| + PCI_MSIX_ENTRY_DATA + |
| + (interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE); |
| + |
| + ret = mobiveil_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, |
| + msg_addr, epc->mem->page_size); |
| + if (ret) |
| + return ret; |
| + |
| + writel(msg_data, ep->msi_mem); |
| + |
| + mobiveil_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys); |
| + |
| + return 0; |
| +} |
| + |
| +void mobiveil_pcie_ep_exit(struct mobiveil_pcie_ep *ep) |
| +{ |
| + struct pci_epc *epc = ep->epc; |
| + |
| + pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem, |
| + epc->mem->page_size); |
| + |
| + pci_epc_mem_exit(epc); |
| +} |
| + |
| +int mobiveil_pcie_ep_init(struct mobiveil_pcie_ep *ep) |
| +{ |
| + int ret; |
| + void *addr; |
| + struct pci_epc *epc; |
| + struct mobiveil_pcie *pcie = to_mobiveil_pcie_from_ep(ep); |
| + struct device *dev = &pcie->pdev->dev; |
| + struct device_node *np = dev->of_node; |
| + |
| + if (!pcie->csr_axi_slave_base) { |
| + dev_err(dev, "csr_base is not populated\n"); |
| + return -EINVAL; |
| + } |
| + |
| + ret = of_property_read_u32(np, "apio-wins", &ep->apio_wins); |
| + if (ret < 0) { |
| + dev_err(dev, "Unable to read apio-wins property\n"); |
| + return ret; |
| + } |
| + |
| + if (ep->apio_wins > MAX_IATU_OUT) { |
| + dev_err(dev, "Invalid apio-wins\n"); |
| + return -EINVAL; |
| + } |
| + ep->apio_wins_map = devm_kcalloc(dev, |
| + BITS_TO_LONGS(ep->apio_wins), |
| + sizeof(long), |
| + GFP_KERNEL); |
| + if (!ep->apio_wins_map) |
| + return -ENOMEM; |
| + |
| + addr = devm_kcalloc(dev, ep->apio_wins, sizeof(phys_addr_t), |
| + GFP_KERNEL); |
| + if (!addr) |
| + return -ENOMEM; |
| + |
| + ep->apio_addr = addr; |
| + |
| + mobiveil_pcie_enable_bridge_pio(pcie); |
| + mobiveil_pcie_enable_engine_apio(pcie); |
| + mobiveil_pcie_enable_engine_ppio(pcie); |
| + mobiveil_pcie_enable_msi_ep(pcie); |
| + |
| + epc = devm_pci_epc_create(dev, &epc_ops); |
| + if (IS_ERR(epc)) { |
| + dev_err(dev, "Failed to create epc device\n"); |
| + return PTR_ERR(epc); |
| + } |
| + |
| + ep->epc = epc; |
| + epc_set_drvdata(epc, ep); |
| + |
| + ret = of_property_read_u8(np, "max-functions", &epc->max_functions); |
| + if (ret < 0) |
| + epc->max_functions = 1; |
| + |
| + if (ep->ops->ep_init) |
| + ep->ops->ep_init(ep); |
| + |
| + ret = __pci_epc_mem_init(epc, ep->phys_base, ep->addr_size, |
| + ep->page_size); |
| + if (ret < 0) { |
| + dev_err(dev, "Failed to initialize address space\n"); |
| + return ret; |
| + } |
| + |
| + ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys, |
| + epc->mem->page_size); |
| + if (!ep->msi_mem) { |
| + dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n"); |
| + return -ENOMEM; |
| + } |
| + |
| + return 0; |
| +} |
| --- a/drivers/pci/controller/mobiveil/pcie-mobiveil.c |
| +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.c |
| @@ -168,18 +168,12 @@ void program_ib_windows(struct mobiveil_ |
| /* |
| * routine to program the outbound windows |
| */ |
| -void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr, |
| - u64 pci_addr, u32 type, u64 size) |
| +void __program_ob_windows(struct mobiveil_pcie *pcie, u8 func_no, int win_num, |
| + u64 cpu_addr, u64 pci_addr, u32 type, u64 size) |
| { |
| u32 value; |
| u64 size64 = ~(size - 1); |
| |
| - if (win_num >= pcie->apio_wins) { |
| - dev_err(&pcie->pdev->dev, |
| - "ERROR: max outbound windows reached !\n"); |
| - return; |
| - } |
| - |
| /* |
| * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit |
| * to 4 KB in PAB_AXI_AMAP_CTRL register |
| @@ -192,6 +186,7 @@ void program_ob_windows(struct mobiveil_ |
| |
| csr_writel(pcie, upper_32_bits(size64), PAB_EXT_AXI_AMAP_SIZE(win_num)); |
| |
| + csr_writel(pcie, func_no, PAB_AXI_AMAP_PCI_HDR_PARAM(win_num)); |
| /* |
| * program AXI window base with appropriate value in |
| * PAB_AXI_AMAP_AXI_WIN0 register |
| @@ -205,10 +200,98 @@ void program_ob_windows(struct mobiveil_ |
| PAB_AXI_AMAP_PEX_WIN_L(win_num)); |
| csr_writel(pcie, upper_32_bits(pci_addr), |
| PAB_AXI_AMAP_PEX_WIN_H(win_num)); |
| +} |
| + |
| +void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, u64 cpu_addr, |
| + u64 pci_addr, u32 type, u64 size) |
| +{ |
| + if (win_num >= pcie->apio_wins) { |
| + dev_err(&pcie->pdev->dev, |
| + "ERROR: max outbound windows reached !\n"); |
| + return; |
| + } |
| + |
| + __program_ob_windows(pcie, 0, win_num, cpu_addr, |
| + pci_addr, type, size); |
| |
| pcie->ob_wins_configured++; |
| } |
| |
| +void program_ob_windows_ep(struct mobiveil_pcie *pcie, u8 func_no, int win_num, |
| + u64 cpu_addr, u64 pci_addr, u32 type, u64 size) |
| +{ |
| + if (size & (size - 1)) |
| + size = 1 << (1 + ilog2(size)); |
| + |
| + __program_ob_windows(pcie, func_no, win_num, cpu_addr, |
| + pci_addr, type, size); |
| +} |
| + |
| +void program_ib_windows_ep(struct mobiveil_pcie *pcie, u8 func_no, |
| + int bar, u64 phys) |
| +{ |
| + csr_writel(pcie, upper_32_bits(phys), |
| + PAB_EXT_PEX_BAR_AMAP(func_no, bar)); |
| + csr_writel(pcie, lower_32_bits(phys) | PEX_BAR_AMAP_EN, |
| + PAB_PEX_BAR_AMAP(func_no, bar)); |
| +} |
| + |
| +void mobiveil_pcie_disable_ib_win_ep(struct mobiveil_pcie *pcie, |
| + u8 func_no, u8 bar) |
| +{ |
| + u32 val; |
| + |
| + val = csr_readl(pcie, PAB_PEX_BAR_AMAP(func_no, bar)); |
| + val &= ~(1 << 0); |
| + csr_writel(pcie, val, PAB_PEX_BAR_AMAP(func_no, bar)); |
| +} |
| + |
| +void mobiveil_pcie_disable_ob_win(struct mobiveil_pcie *pcie, int win_num) |
| +{ |
| + u32 val; |
| + |
| + val = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num)); |
| + val &= ~(1 << WIN_ENABLE_SHIFT); |
| + csr_writel(pcie, val, PAB_AXI_AMAP_CTRL(win_num)); |
| +} |
| + |
| +void mobiveil_pcie_enable_bridge_pio(struct mobiveil_pcie *pcie) |
| +{ |
| + u32 val; |
| + |
| + val = csr_readl(pcie, PAB_CTRL); |
| + val |= 1 << AMBA_PIO_ENABLE_SHIFT; |
| + val |= 1 << PEX_PIO_ENABLE_SHIFT; |
| + csr_writel(pcie, val, PAB_CTRL); |
| +} |
| + |
| +void mobiveil_pcie_enable_engine_apio(struct mobiveil_pcie *pcie) |
| +{ |
| + u32 val; |
| + |
| + val = csr_readl(pcie, PAB_AXI_PIO_CTRL); |
| + val |= APIO_EN_MASK; |
| + csr_writel(pcie, val, PAB_AXI_PIO_CTRL); |
| +} |
| + |
| +void mobiveil_pcie_enable_engine_ppio(struct mobiveil_pcie *pcie) |
| +{ |
| + u32 val; |
| + |
| + val = csr_readl(pcie, PAB_PEX_PIO_CTRL); |
| + val |= 1 << PIO_ENABLE_SHIFT; |
| + csr_writel(pcie, val, PAB_PEX_PIO_CTRL); |
| +} |
| + |
| +void mobiveil_pcie_enable_msi_ep(struct mobiveil_pcie *pcie) |
| +{ |
| + u32 val; |
| + |
| + val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB); |
| + val |= PAB_INTP_PAMR; |
| + csr_writel(pcie, val, PAB_INTP_AMBA_MISC_ENB); |
| +} |
| + |
| int mobiveil_bringup_link(struct mobiveil_pcie *pcie) |
| { |
| int retries; |
| --- a/drivers/pci/controller/mobiveil/pcie-mobiveil.h |
| +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.h |
| @@ -15,8 +15,12 @@ |
| #include <linux/pci.h> |
| #include <linux/irq.h> |
| #include <linux/msi.h> |
| +#include <linux/pci-epc.h> |
| +#include <linux/pci-epf.h> |
| + |
| #include "../../pci.h" |
| |
| +#define MAX_IATU_OUT 256 |
| /* register offsets and bit positions */ |
| |
| /* |
| @@ -42,6 +46,9 @@ |
| #define PAGE_SEL_MASK 0x3f |
| #define PAGE_LO_MASK 0x3ff |
| #define PAGE_SEL_OFFSET_SHIFT 10 |
| +#define FUNC_SEL_SHIFT 19 |
| +#define FUNC_SEL_MASK 0x1ff |
| +#define MSI_SW_CTRL_EN BIT(29) |
| |
| #define PAB_ACTIVITY_STAT 0x81c |
| |
| @@ -52,6 +59,7 @@ |
| #define PIO_ENABLE_SHIFT 0 |
| |
| #define PAB_INTP_AMBA_MISC_ENB 0x0b0c |
| +#define PAB_INTP_PAMR BIT(0) |
| #define PAB_INTP_AMBA_MISC_STAT 0x0b1c |
| #define PAB_INTP_RESET BIT(1) |
| #define PAB_INTP_MSI BIT(3) |
| @@ -72,6 +80,8 @@ |
| #define WIN_TYPE_MASK 0x3 |
| #define WIN_SIZE_MASK 0xfffffc00 |
| |
| +#define PAB_AXI_AMAP_PCI_HDR_PARAM(win) PAB_EXT_REG_ADDR(0x5ba0, win) |
| + |
| #define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win) |
| |
| #define PAB_EXT_AXI_AMAP_AXI_WIN(win) PAB_EXT_REG_ADDR(0x80a0, win) |
| @@ -101,6 +111,18 @@ |
| #define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win) |
| #define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win) |
| |
| +/* PPIO WINs EP mode */ |
| +#define PAB_PEX_BAR_AMAP(func, bar) (0x1ba0 + 0x20 * func + 4 * bar) |
| +#define PAB_EXT_PEX_BAR_AMAP(func, bar) (0x84a0 + 0x20 * func + 4 * bar) |
| +#define PEX_BAR_AMAP_EN BIT(0) |
| + |
| +#define PAB_MSIX_TABLE_PBA_ACCESS 0xD000 |
| + |
| +#define GPEX_BAR_ENABLE 0x4D4 |
| +#define GPEX_BAR_SIZE_LDW 0x4D8 |
| +#define GPEX_BAR_SIZE_UDW 0x4DC |
| +#define GPEX_BAR_SELECT 0x4E0 |
| + |
| /* starting offset of INTX bits in status register */ |
| #define PAB_INTX_START 5 |
| |
| @@ -138,6 +160,7 @@ |
| ((off >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK) |
| |
| struct mobiveil_pcie; |
| +struct mobiveil_pcie_ep; |
| |
| struct mobiveil_msi { /* MSI information */ |
| struct mutex lock; /* protect bitmap variable */ |
| @@ -170,6 +193,28 @@ struct mobiveil_pab_ops { |
| int (*host_init)(struct mobiveil_pcie *pcie); |
| }; |
| |
| +struct mobiveil_pcie_ep_ops { |
| + void (*ep_init)(struct mobiveil_pcie_ep *ep); |
| + int (*raise_irq)(struct mobiveil_pcie_ep *ep, u8 func_no, |
| + enum pci_epc_irq_type type, u16 interrupt_num); |
| + const struct pci_epc_features* (*get_features) |
| + (struct mobiveil_pcie_ep *ep); |
| +}; |
| + |
| +struct mobiveil_pcie_ep { |
| + struct pci_epc *epc; |
| + const struct mobiveil_pcie_ep_ops *ops; |
| + phys_addr_t phys_base; |
| + size_t addr_size; |
| + size_t page_size; |
| + phys_addr_t *apio_addr; |
| + unsigned long *apio_wins_map; |
| + u32 apio_wins; |
| + void __iomem *msi_mem; |
| + phys_addr_t msi_mem_phys; |
| + u8 bar_num; |
| +}; |
| + |
| struct mobiveil_pcie { |
| struct platform_device *pdev; |
| struct list_head *resources; |
| @@ -183,8 +228,12 @@ struct mobiveil_pcie { |
| const struct mobiveil_pab_ops *ops; |
| struct root_port rp; |
| struct pci_host_bridge *bridge; |
| + struct mobiveil_pcie_ep ep; |
| }; |
| |
| +#define to_mobiveil_pcie_from_ep(endpoint) \ |
| + container_of((endpoint), struct mobiveil_pcie, ep) |
| + |
| int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie); |
| int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit); |
| bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie); |
| @@ -226,4 +275,23 @@ static inline void csr_writeb(struct mob |
| csr_write(pcie, val, off, 0x1); |
| } |
| |
| +void program_ib_windows_ep(struct mobiveil_pcie *pcie, u8 func_no, |
| + int bar, u64 phys); |
| +void program_ob_windows_ep(struct mobiveil_pcie *pcie, u8 func_num, int win_num, |
| + u64 cpu_addr, u64 pci_addr, u32 type, u64 size); |
| +void mobiveil_pcie_disable_ib_win_ep(struct mobiveil_pcie *pci, |
| + u8 func_no, u8 bar); |
| +void mobiveil_pcie_disable_ob_win(struct mobiveil_pcie *pcie, int win_num); |
| +int mobiveil_pcie_ep_init(struct mobiveil_pcie_ep *ep); |
| +int mobiveil_pcie_ep_raise_legacy_irq(struct mobiveil_pcie_ep *ep, u8 func_no); |
| +int mobiveil_pcie_ep_raise_msi_irq(struct mobiveil_pcie_ep *ep, u8 func_no, |
| + u8 interrupt_num); |
| +int mobiveil_pcie_ep_raise_msix_irq(struct mobiveil_pcie_ep *ep, u8 func_no, |
| + u16 interrupt_num); |
| +void mobiveil_pcie_ep_reset_bar(struct mobiveil_pcie *pci, u8 bar); |
| +u8 mobiveil_pcie_ep_get_bar_num(struct mobiveil_pcie_ep *ep, u8 func_no); |
| +void mobiveil_pcie_enable_bridge_pio(struct mobiveil_pcie *pci); |
| +void mobiveil_pcie_enable_engine_apio(struct mobiveil_pcie *pci); |
| +void mobiveil_pcie_enable_engine_ppio(struct mobiveil_pcie *pci); |
| +void mobiveil_pcie_enable_msi_ep(struct mobiveil_pcie *pci); |
| #endif /* _PCIE_MOBIVEIL_H */ |