blob: a165ea54318afabab2a12db8865b841ecd7b2a89 [file] [log] [blame]
// SPDX-License-Identifier: GPL-2.0
/*
* ASR PCIe host controller driver.
*
* Copyright (c) 2021 ASRMicro Inc.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/msi.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/pm_qos.h>
#include <linux/cputype.h>
#include <soc/asr/regs-addr.h>
#include "../pci.h"
#include "pcie-falcon.h"
/* Time for delay */
#define REF_PERST_MIN 20000
#define REF_PERST_MAX 25000
#define PERST_ACCESS_MIN 10000
#define PERST_ACCESS_MAX 12000
struct falcon_pcie_port;
struct falcon_pcie_soc {
struct pci_ops *ops;
};
struct falcon_pcie_port {
void __iomem *base;
void __iomem *phy_base;
struct list_head list;
struct falcon_pcie *pcie;
struct clk *axi_ck;
struct phy *phy;
u32 slot;
int irq;
struct mutex lock;
s32 lpm_qos;
int gpio_reset;
bool suspended;
};
#ifdef CONFIG_PCI_MSI
#define INT_PCI_MSI_NR 32
struct falcon_msi {
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
struct irq_domain *domain;
struct msi_controller chip;
unsigned long pages;
struct mutex lock;
int irq;
};
#endif
struct falcon_pcie {
struct device *dev;
struct phy *phy;
struct falcon_pcie_port *port;
struct resource mem;
const struct falcon_pcie_soc *soc;
unsigned int busnr;
struct pci_host_bridge *host;
struct pm_qos_request qos_idle;
#ifdef CONFIG_PCI_MSI
struct falcon_msi msi;
#endif
};
#ifdef CONFIG_PCI_MSI
static inline struct falcon_msi *to_falcon_msi(struct msi_controller *chip)
{
return container_of(chip, struct falcon_msi, chip);
};
#endif
static void falcon_reset(struct falcon_pcie_port *port, u8 on)
{
int ret;
if (!gpio_request(port->gpio_reset, "pcie_perst")) {
usleep_range(REF_PERST_MIN, REF_PERST_MAX);
ret = gpio_direction_output(port->gpio_reset, on);
gpio_free(port->gpio_reset);
if (ret)
pr_err("Falcon perst gpio set output failed %d.\n",
port->gpio_reset);
usleep_range(PERST_ACCESS_MIN, PERST_ACCESS_MAX);
} else {
pr_err("Falcon perst gpio request failed %d.\n",
port->gpio_reset);
}
}
static void falcon_preset_assert(struct falcon_pcie_port *port)
{
falcon_reset(port, 1);
}
static void falcon_preset_deassert(struct falcon_pcie_port *port)
{
falcon_reset(port, 0);
}
int cfg_read(void __iomem *addr, int where, int size, u32 *val)
{
*val = readl(addr);
if (size == 1)
*val = (*val >> (8 * (where & 3))) & 0xff;
else if (size == 2)
*val = (*val >> (8 * (where & 3))) & 0xffff;
else if (size != 4)
return PCIBIOS_BAD_REGISTER_NUMBER;
return PCIBIOS_SUCCESSFUL;
}
int cfg_write(void __iomem *addr, int where, int size, u32 val)
{
if (size == 4)
writel(val, addr);
else if (size == 2)
writew(val, addr + (where & 2));
else if (size == 1)
writeb(val, addr + (where & 3));
else
return PCIBIOS_BAD_REGISTER_NUMBER;
return PCIBIOS_SUCCESSFUL;
}
static void falcon_pcie_subsys_powerdown(struct falcon_pcie *pcie)
{
struct device *dev = pcie->dev;
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
}
static void falcon_pcie_port_free(struct falcon_pcie_port *port)
{
struct falcon_pcie *pcie = port->pcie;
struct device *dev = pcie->dev;
devm_iounmap(dev, port->base);
list_del(&port->list);
devm_kfree(dev, port);
}
static void falcon_pcie_put_resources(struct falcon_pcie *pcie)
{
struct falcon_pcie_port *port = pcie->port;
phy_power_off(port->phy);
phy_exit(port->phy);
clk_disable_unprepare(port->axi_ck);
falcon_pcie_port_free(port);
falcon_pcie_subsys_powerdown(pcie);
}
static int falcon_pcie_hw_rd_cfg(struct falcon_pcie_port *port, u32 bus, u32 devfn,
int where, int size, u32 *val)
{
u32 value;
int ret;
if (port->suspended) {
return -1;
}
mutex_lock(&port->lock);
if (PCI_FUNC(devfn) == 0) {
value = readl(port->base + PCIE_CFGNUM);
if (bus == 1) {
writel((value|(0x1<<8)), port->base + PCIE_CFGNUM);
ret = cfg_read(port->base + FALCON_PCIE_CONFIG_OFFSET
+ (where & ~0x3), where, size, val);
} else if (bus == 0) {
writel((value&(~(0x1<<8))), port->base + PCIE_CFGNUM);
ret = cfg_read(port->base + FALCON_PCIE_CONFIG_OFFSET
+ (where & ~0x3), where, size, val);
}
}
mutex_unlock(&port->lock);
return PCIBIOS_SUCCESSFUL;
}
static int falcon_pcie_hw_wr_cfg(struct falcon_pcie_port *port, u32 bus, u32 devfn,
int where, int size, u32 val)
{
int ret;
u32 value;
if (port->suspended) {
return -1;
}
mutex_lock(&port->lock);
if (PCI_FUNC(devfn) == 0) {
if (bus == 1) {
if ((where != CFG_BAR0_REG) && (where != CFG_BAR1_REG)) {
value = readl(port->base + PCIE_CFGNUM);
writel((value|(0x1<<8)), port->base + PCIE_CFGNUM);
ret = cfg_write(port->base + FALCON_PCIE_CONFIG_OFFSET
+ (where & ~0x3), where, size, val);
}
} else if (bus == 0){
/* avoid CFG_BAR0_REG/CFG_BAR1_REG to be overwritten later */
if ((where != CFG_BAR0_REG) && (where != CFG_BAR1_REG)) {
value = readl(port->base + PCIE_CFGNUM);
writel((value&(~(0x1<<8))), port->base + PCIE_CFGNUM);
ret = cfg_write(port->base + FALCON_PCIE_CONFIG_OFFSET
+ (where & ~0x3), where, size, val);
}
}
}
mutex_unlock(&port->lock);
return ret;
}
static struct falcon_pcie_port *falcon_pcie_find_port(struct pci_bus *bus,
unsigned int devfn)
{
struct falcon_pcie *pcie = bus->sysdata;
struct falcon_pcie_port *port = pcie->port;
struct pci_dev *dev = NULL;
/*
* Walk the bus hierarchy to get the devfn value
* of the port in the root bus.
*/
while (bus && bus->number) {
dev = bus->self;
bus = dev->bus;
devfn = dev->devfn;
}
if (port->slot == PCI_SLOT(devfn))
return port;
return NULL;
}
static int falcon_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
struct falcon_pcie_port *port;
u32 bn = bus->number;
int ret;
port = falcon_pcie_find_port(bus, devfn);
if (!port) {
*val = ~0;
return PCIBIOS_DEVICE_NOT_FOUND;
}
ret = falcon_pcie_hw_rd_cfg(port, bn, devfn, where, size, val);
if (ret)
*val = ~0;
return ret;
}
static int falcon_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
struct falcon_pcie_port *port;
u32 bn = bus->number;
port = falcon_pcie_find_port(bus, devfn);
if (!port)
return PCIBIOS_DEVICE_NOT_FOUND;
return falcon_pcie_hw_wr_cfg(port, bn, devfn, where, size, val);
}
static struct pci_ops falcon_pcie_ops = {
.read = falcon_pcie_config_read,
.write = falcon_pcie_config_write,
};
#ifndef CONFIG_PCI_MSI
static irqreturn_t falcon_pcie_irq_handler(int irq, void *arg)
{
struct falcon_pcie_port *pp = arg;
u32 val;
val = readl(pp->base + XR3PCI_LOCAL_INT_STATUS);
if (val != 0) {
pm_wakeup_event(pp->pcie->dev, 2000);
val = readl(pp->base + XR3PCI_LOCAL_INT_STATUS);
writel(val, pp->base + XR3PCI_LOCAL_INT_STATUS);
}
return IRQ_HANDLED;
}
#endif
#ifdef CONFIG_PCI_MSI
static int falcon_msi_alloc(struct falcon_msi *chip)
{
int msi;
mutex_lock(&chip->lock);
msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
if (msi < INT_PCI_MSI_NR)
set_bit(msi, chip->used);
else
msi = -ENOSPC;
mutex_unlock(&chip->lock);
return msi;
}
static int falcon_msi_alloc_region(struct falcon_msi *chip, int no_irqs)
{
int msi;
mutex_lock(&chip->lock);
msi = bitmap_find_free_region(chip->used, INT_PCI_MSI_NR,
order_base_2(no_irqs));
mutex_unlock(&chip->lock);
return msi;
}
static void falcon_msi_free(struct falcon_msi *chip, unsigned long irq)
{
mutex_lock(&chip->lock);
clear_bit(irq, chip->used);
mutex_unlock(&chip->lock);
}
static irqreturn_t falcon_pcie_msi_irq(int irq, void *data)
{
struct falcon_pcie *pcie = data;
struct falcon_msi *msi = &pcie->msi;
struct device *dev = pcie->dev;
unsigned int reg, val;
val = readl(pcie->port->base + XR3PCI_LOCAL_INT_STATUS);
if (val != 0)
pm_wakeup_event(pcie->dev, 2000);
reg = (u32)readl(pcie->port->base + XR3PCI_MSI_INT_STATUS);
if (!reg)
return IRQ_NONE;
while (reg) {
unsigned int index = find_first_bit((long unsigned int *)&reg, 32);
unsigned int msi_irq;
/* clear the interrupt */
writel(1 << index, pcie->port->base + XR3PCI_MSI_INT_STATUS);
msi_irq = irq_find_mapping(msi->domain, index);
if (msi_irq) {
if (test_bit(index, msi->used))
generic_handle_irq(msi_irq);
else
dev_info(dev, "unhandled MSI\n");
} else {
/* Unknown MSI, just clear it */
dev_dbg(dev, "unexpected MSI\n");
}
/* see if there's any more pending in this vector */
reg = (u32)readl(pcie->port->base + XR3PCI_MSI_INT_STATUS);
}
writel(val, pcie->port->base + XR3PCI_LOCAL_INT_STATUS);
return IRQ_HANDLED;
}
static int falcon_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
struct msi_desc *desc)
{
struct falcon_msi *msi = to_falcon_msi(chip);
struct falcon_pcie *pcie = container_of(chip, struct falcon_pcie, msi.chip);
struct msi_msg msg;
unsigned int irq;
int hwirq;
u64 msi_target;
hwirq = falcon_msi_alloc(msi);
if (hwirq < 0)
return hwirq;
irq = irq_find_mapping(msi->domain, hwirq);
if (!irq) {
falcon_msi_free(msi, hwirq);
return -EINVAL;
}
irq_set_msi_desc(irq, desc);
msi_target = (u64)readl(pcie->port->base + IMSI_ADDR);
/* EP need write to BAR0_ADDR + IMSI_ADDR to trigger MSI */
msi_target |= BAR0_ADDR;
msg.address_lo = (u32)(msi_target & 0xffffffff);
msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);
msg.data = hwirq;
pci_write_msi_msg(irq, &msg);
return 0;
}
static int falcon_msi_setup_irqs(struct msi_controller *chip,
struct pci_dev *pdev, int nvec, int type)
{
struct falcon_pcie *pcie = container_of(chip, struct falcon_pcie, msi.chip);
struct falcon_msi *msi = to_falcon_msi(chip);
struct msi_desc *desc;
struct msi_msg msg;
u64 msi_target;
unsigned int irq;
int hwirq;
int i;
/* MSI-X interrupts are not supported */
if (type == PCI_CAP_ID_MSIX)
return -EINVAL;
WARN_ON(!list_is_singular(&pdev->dev.msi_list));
desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
hwirq = falcon_msi_alloc_region(msi, nvec);
if (hwirq < 0)
return -ENOSPC;
irq = irq_find_mapping(msi->domain, hwirq);
if (!irq)
return -ENOSPC;
for (i = 0; i < nvec; i++) {
/*
* irq_create_mapping() called from falcon_pcie_probe() pre-
* allocates descs, so there is no need to allocate descs here.
* We can therefore assume that if irq_find_mapping() above
* returns non-zero, then the descs are also successfully
* allocated.
*/
if (irq_set_msi_desc_off(irq, i, desc)) {
/* TODO: clear */
return -EINVAL;
}
}
desc->nvec_used = nvec;
desc->msi_attrib.multiple = order_base_2(nvec);
msi_target = (u64)readl(pcie->port->base + IMSI_ADDR);
/* EP need write to BAR0_ADDR + IMSI_ADDR to trigger MSI */
msi_target |= BAR0_ADDR;
msg.address_lo = (u32)(msi_target & 0xffffffff);
msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);
msg.data = hwirq;
pci_write_msi_msg(irq, &msg);
return 0;
}
static void falcon_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
{
struct falcon_msi *msi = to_falcon_msi(chip);
struct irq_data *d = irq_get_irq_data(irq);
falcon_msi_free(msi, d->hwirq);
}
static struct irq_chip falcon_msi_irq_chip = {
.name = "PCI-MSI",
.irq_enable = pci_msi_unmask_irq,
.irq_disable = pci_msi_mask_irq,
.irq_mask = pci_msi_mask_irq,
.irq_unmask = pci_msi_unmask_irq,
};
static int falcon_msi_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_chip_and_handler(irq, &falcon_msi_irq_chip, handle_simple_irq);
irq_set_chip_data(irq, domain->host_data);
return 0;
}
static const struct irq_domain_ops msi_domain_ops = {
.map = falcon_msi_map,
};
static void falcon_pcie_unmap_msi(struct falcon_pcie *pcie)
{
struct falcon_msi *msi = &pcie->msi;
int i, irq;
for (i = 0; i < INT_PCI_MSI_NR; i++) {
irq = irq_find_mapping(msi->domain, i);
if (irq > 0)
irq_dispose_mapping(irq);
}
irq_domain_remove(msi->domain);
}
static int falcon_pcie_enable_msi(struct falcon_pcie *pcie)
{
struct device *dev = pcie->dev;
struct falcon_msi *msi = &pcie->msi;
//phys_addr_t base;
int err, i;
mutex_init(&msi->lock);
msi->chip.dev = dev;
msi->chip.setup_irq = falcon_msi_setup_irq;
msi->chip.setup_irqs = falcon_msi_setup_irqs;
msi->chip.teardown_irq = falcon_msi_teardown_irq;
msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
&msi_domain_ops, &msi->chip);
if (!msi->domain) {
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
for (i = 0; i < INT_PCI_MSI_NR; i++) {
irq_create_mapping(msi->domain, i);
}
err = devm_request_irq(dev, pcie->port->irq + 16, falcon_pcie_msi_irq,
IRQF_SHARED, falcon_msi_irq_chip.name, pcie);
if (err < 0) {
dev_err(dev, "failed to request IRQ: %d\n", err);
goto err;
}
/* clear all interrupts */
writel(0xffffffff, pcie->port->base + XR3PCI_LOCAL_INT_STATUS);
/* enable all MSI interrupts */
writel(XR3PCI_INT_MSI, pcie->port->base + XR3PCI_LOCAL_INT_MASK);
return 0;
err:
falcon_pcie_unmap_msi(pcie);
return err;
}
#endif
#ifndef CONFIG_PCI_MSI
static int falcon_enable_interrupt(struct falcon_pcie_port *port)
{
/* clear all interrupts */
writel(0xffffffff, port->base + XR3PCI_LOCAL_INT_STATUS);
/* Enable legacy interrupts */
writel(XR3PCI_INT_INTx, port->base + XR3PCI_LOCAL_INT_MASK);
return 0;
}
#endif
static void falcon_update_atr_entry(void __iomem *base,
resource_size_t src_addr, resource_size_t trsl_addr,
int trsl_param, int window_size)
{
/* bit 0: enable entry, bits 1-6: ATR window size (2^window_size + 1) */
writel(src_addr | (window_size<<1) | 0x1, base + XR3PCI_ATR_SRC_ADDR_LOW);
writel(0, base + XR3PCI_ATR_SRC_ADDR_HIGH);
writel(trsl_addr, base + XR3PCI_ATR_TRSL_ADDR_LOW);
writel(0, base + XR3PCI_ATR_TRSL_ADDR_HIGH);
writel(trsl_param, base + XR3PCI_ATR_TRSL_PARAM);
}
static int falcon_pcie_startup_port(struct falcon_pcie_port *port)
{
void __iomem *table_base;
resource_size_t src_addr;
unsigned long ddr_size_mb = get_num_physpages() >> (20 - PAGE_SHIFT);
int err, i = 0;
u32 val, address;
/* Address translation from CPU to PCIe */
table_base = port->base + XR3PCI_ATR_AXI4_SLV0;
address = (int)port->base;
if((address & 0xfffff) == 0x88000)
src_addr = (0xe0000000&0xFF000000) + 0x100000;
if((address & 0xfffff) == 0x8c000)
src_addr = (0xd8000000&0xFF000000) + 0x100000;
/* map slave if to PCIe IF */
falcon_update_atr_entry(port->base + XR3PCI_ATR_AXI4_SLV0,
src_addr, src_addr, XR3PCI_ATR_TRSLID_PCIE_CONF, 0x19);
/* map DDR addr range */
while (ddr_size_mb >>= 1)
i++;
falcon_update_atr_entry(port->base + XR3PCI_ATR_PCIE_WIN0 + 0x20,
0, 0, XR3PCI_ATR_TRSLID_AXIMEMORY, (20 + i - 1));
pr_debug("%s: log(2) of ddr size = %d\n", __func__, i);
/* disable the other ATU windows */
for (i = XR3PCI_ATR_PCIE_WIN0 + 0x40; i < XR3PCI_ATR_AXI4_SLV0; i += 0x20)
{
falcon_update_atr_entry(port->base + i,
0, 0, XR3PCI_ATR_TRSLID_AXIMEMORY, 0);
}
/* register1 bus enable */
val = readl(port->base + CFG_STATUS_COMMAND);
writel(val|0x7, port->base + CFG_STATUS_COMMAND);
/* PCIE_CFGNUM Bus Number, Pri=0, Sec=1, Sub=01 */
writel(0x1<<16 | 0x1<<8, port->base + CFG_SUBSEC_PRIM);
#ifdef CONFIG_CPU_ASR1903
if (cpu_is_asr1903() && (!cpu_is_asr1903_z1())) {
val = readl(port->base + PCIE_CFGCTRL);
val &= ~(0x1<<2); //clear ltssm disable bit, enable ltssm
writel(val, port->base + PCIE_CFGCTRL);
}
#endif
err = readl_poll_timeout_atomic(port->base + XR3PCI_BASIC_STATUS, val,
(val & XR3PCI_BS_LINK_MASK), 10, 100 * USEC_PER_MSEC);
if (err) {
pr_info(DEVICE_NAME ": No link negotiated\n");
return -EIO;
}
mdelay(1);
val = readl(port->base + XR3PCI_BASIC_STATUS);
pr_info(DEVICE_NAME " %dx link negotiated (gen %d), maxpayload %d \
maxreqsize %d\n",
val & XR3PCI_BS_LINK_MASK, (val & XR3PCI_BS_GEN_MASK) >> 8,
2 << (6 + ((val & XR3PCI_BS_NEG_PAYLOAD_MASK) >> 24)),
2 << (6 + ((val & XR3PCI_BS_NEG_REQSIZE_MASK) >> 28)));
#ifndef CONFIG_PCI_MSI
falcon_enable_interrupt(port);
#endif
return 0;
}
static int falcon_pcie_enable_port(struct falcon_pcie_port *port)
{
int err;
u32 val;
err = clk_prepare_enable(port->axi_ck);
if (err) {
pr_info("failed to enable axi_ck%d\n", port->slot);
goto err_axi_clk;
}
#ifdef CONFIG_CPU_ASR1903
if (cpu_is_asr1903() && (!cpu_is_asr1903_z1())) {
val = readl(regs_addr_get_va(REGS_ADDR_APMU) + APMU_PCIE_CLK_RST_CTRL);
writel(val | (1<<27), regs_addr_get_va(REGS_ADDR_APMU) + APMU_PCIE_CLK_RST_CTRL);
}
#endif
err = phy_init(port->phy);
if (err) {
pr_info("failed to initialize port%d phy\n", port->slot);
goto err_phy_init;
}
/* perst assert Endpoint */
falcon_preset_assert(port);
val = readl(port->base + CFG_DEVICE_CTRL);
/* max payload size 256, max read request size 256 */
writel((val&0xffff9f3f), port->base + CFG_DEVICE_CTRL);
val = readl(port->base + CFG_DEVICE_CAP);
writel((val&0xffffff18), port->base + CFG_DEVICE_CAP);
#ifdef CONFIG_CPU_ASR1903
if (cpu_is_asr1903() && (!cpu_is_asr1903_z1())) {
writel(0xffffffff, port->base + PCIE_BAR1);
val = readl(port->base + PCIE_BAR0);
writel(val | 0x4, port->base + PCIE_BAR0);
}
#endif
/*
* Set BAR0_REG to BAR0_ADDR to avoid config space overwritten.
* From PLDA spec: "PCI Express BAR0/1 is configured as 64-bit prefetchable memory space of 16 KBytes.
* PCIe read and write requests targeting BAR0/1 are routed to Bridge Configuration space."
*/
val = readl(port->base + PCIE_CFGNUM);
writel((val & (~(0x1<<8))), port->base + PCIE_CFGNUM);
writel(0x0, port->base + CFG_BAR1_REG);
writel(BAR0_ADDR | 0xc, port->base + CFG_BAR0_REG);
err = falcon_pcie_startup_port(port);
if (err)
goto err_phy_init;
return 0;
err_phy_init:
clk_disable_unprepare(port->axi_ck);
err_axi_clk:
falcon_pcie_port_free(port);
return -1;
}
static int falcon_pcie_parse_port(struct falcon_pcie *pcie,
struct device_node *node,
int slot)
{
struct falcon_pcie_port *port;
struct resource *regs;
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
const __be32 *prop;
unsigned int proplen;
int err;
port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pciephy");
port->phy_base = (void *)(regs->start);
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pciectrl");
port->base = devm_ioremap_resource(dev, regs);
if (IS_ERR(port->base)) {
dev_err(dev, "failed to map port%d base\n", slot);
return PTR_ERR(port->base);
}
port->axi_ck = of_clk_get_by_name(node, NULL);
if (IS_ERR(port->axi_ck)) {
dev_err(&pdev->dev, "Error %ld to get pcie clock\n",
PTR_ERR(port->axi_ck));
return PTR_ERR(port->axi_ck);
}
/* some platforms may use default PHY setting */
port->phy = devm_phy_optional_get(dev, "pcie-phy");
if (IS_ERR(port->phy))
return PTR_ERR(port->phy);
port->slot = slot;
port->pcie = pcie;
pcie->port = port;
mutex_unlock(&port->lock);
port->suspended = false;
prop = of_get_property(node, "lpm-qos", &proplen);
if (prop)
port->lpm_qos = be32_to_cpup(prop);
err = of_property_read_u32(node, "interrupts", &(port->irq));
if (err)
return err;
#ifndef CONFIG_PCI_MSI
port->irq += 16;
err = devm_request_irq(dev, port->irq, falcon_pcie_irq_handler,
IRQF_SHARED, "falcon-pcie", port);
if (err) {
dev_err(dev, "failed to request irq %d\n", port->irq);
return err;
}
#endif
port->gpio_reset = of_get_named_gpio(node, "reset-gpios", 0);
if (port->gpio_reset < 0)
return -ENODEV;
INIT_LIST_HEAD(&port->list);
return 0;
}
static int falcon_pcie_subsys_powerup(struct falcon_pcie *pcie)
{
struct device *dev = pcie->dev;
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
return 0;
}
static int falcon_pcie_setup(struct falcon_pcie *pcie)
{
struct device *dev = pcie->dev;
struct device_node *node = dev->of_node;
struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
struct list_head *windows = &host->windows;
struct resource_entry *win, *tmp_win;
resource_size_t io_base;
int err;
err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
windows, &io_base);
if (err)
return err;
err = devm_request_pci_bus_resources(dev, windows);
if (err < 0)
return err;
/* Get the I/O and memory ranges from DT */
resource_list_for_each_entry_safe(win, tmp_win, windows) {
switch (resource_type(win->res)) {
case IORESOURCE_IO:
err = devm_pci_remap_iospace(dev, win->res, io_base);
if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, win->res);
resource_list_destroy_entry(win);
}
break;
case IORESOURCE_MEM:
memcpy(&pcie->mem, win->res, sizeof(*win->res));
pcie->mem.name = "mem";
pcie->busnr = 0;
break;
case IORESOURCE_BUS:
pcie->busnr = win->res->start;
break;
}
}
err = falcon_pcie_parse_port(pcie, node, 0);
if (err)
goto error_put_node;
err = falcon_pcie_subsys_powerup(pcie);
if (err)
return err;
err = falcon_pcie_enable_port(pcie->port);
if (err)
return err;
#ifdef CONFIG_PCI_MSI
if (IS_ENABLED(CONFIG_PCI_MSI)) {
err = falcon_pcie_enable_msi(pcie);
if (err < 0) {
dev_err(dev,
"failed to enable MSI support: %d\n",
err);
goto error_put_node;
}
}
#endif
return 0;
error_put_node:
return err;
}
static int falcon_pcie_suspend_noirq(struct device *dev);
static int falcon_pcie_resume_noirq(struct device *dev);
static ssize_t host_ctrl_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct falcon_pcie *pcie = dev_get_drvdata(dev);
int ret;
u32 on;
ret = sscanf(buf, "%x\n", &on);
if (ret != 1)
return -EINVAL;
if (!on) {
dev_info(dev, "perst deassert and suspend pcie controller.\n");
falcon_pcie_suspend_noirq(dev);
falcon_preset_deassert(pcie->port);
} else {
dev_info(dev, "perst assert and resume pcie controller.\n");
falcon_preset_assert(pcie->port);
falcon_pcie_resume_noirq(dev);
}
return count;
}
static DEVICE_ATTR_WO(host_ctrl);
static struct attribute *falcon_pcie_rc_attrs[] = {
&dev_attr_host_ctrl.attr,
NULL
};
static struct attribute_group falcon_pcie_attrgroup = {
.attrs = falcon_pcie_rc_attrs,
};
static int falcon_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct falcon_pcie *pcie;
struct pci_host_bridge *host;
int err;
host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
if (!host)
return -ENOMEM;
pcie = pci_host_bridge_priv(host);
pcie->dev = dev;
pcie->soc = of_device_get_match_data(dev);
/* add attributes for host_ctrl */
err = sysfs_create_group(&pdev->dev.kobj, &falcon_pcie_attrgroup);
if (err)
return -ENODEV;
err = falcon_pcie_setup(pcie);
if (err)
return -ENODEV;
platform_set_drvdata(pdev, pcie);
host->busnr = pcie->busnr;
host->dev.parent = pcie->dev;
host->ops = pcie->soc->ops;
host->map_irq = of_irq_parse_and_map_pci;
host->swizzle_irq = pci_common_swizzle;
host->sysdata = pcie;
pcie->host = host;
#ifdef CONFIG_PCI_MSI
if (IS_ENABLED(CONFIG_PCI_MSI))
host->msi = &pcie->msi.chip;
#endif
pm_qos_add_request(&pcie->qos_idle, PM_QOS_CPUIDLE_BLOCK,
PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
pcie->qos_idle.name = pdev->name;
device_init_wakeup(&pdev->dev, 1);
err = pci_host_probe(host);
pm_qos_update_request(&pcie->qos_idle, pcie->port->lpm_qos);
if (err)
goto put_resources;
return 0;
put_resources:
falcon_pcie_put_resources(pcie);
return err;
}
static void falcon_pcie_free_resources(struct falcon_pcie *pcie)
{
struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
struct list_head *windows = &host->windows;
pci_free_resource_list(windows);
}
static int falcon_pcie_remove(struct platform_device *pdev)
{
struct falcon_pcie *pcie = platform_get_drvdata(pdev);
struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
pci_stop_root_bus(host->bus);
pci_remove_root_bus(host->bus);
falcon_pcie_free_resources(pcie);
falcon_pcie_put_resources(pcie);
return 0;
}
static int __maybe_unused falcon_pcie_suspend_noirq(struct device *dev)
{
struct falcon_pcie *pcie = dev_get_drvdata(dev);
struct falcon_pcie_port *port = pcie->port;
if (port->suspended) {
return 0;
}
clk_disable_unprepare(port->axi_ck);
phy_power_off(port->phy);
phy_exit(port->phy);
pm_qos_update_request(&pcie->qos_idle,
PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
port->suspended = true;
return 0;
}
static int __maybe_unused falcon_pcie_resume_noirq(struct device *dev)
{
struct falcon_pcie *pcie = dev_get_drvdata(dev);
struct falcon_pcie_port *port = pcie->port;
if (!port->suspended) {
return 0;
}
pm_qos_update_request(&pcie->qos_idle, port->lpm_qos);
falcon_pcie_enable_port(port);
port->suspended = false;
return 0;
}
static const struct dev_pm_ops falcon_pcie_pm_ops = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(falcon_pcie_suspend_noirq,
falcon_pcie_resume_noirq)
};
static const struct falcon_pcie_soc falcon_pcie_soc = {
.ops = &falcon_pcie_ops,
};
static const struct of_device_id falcon_pcie_ids[] = {
{ .compatible = "asr,falcon-pcie", .data = &falcon_pcie_soc },
{},
};
static struct platform_driver falcon_pcie_driver = {
.probe = falcon_pcie_probe,
.remove = falcon_pcie_remove,
.driver = {
.name = "pcie-falcon",
.of_match_table = falcon_pcie_ids,
.suppress_bind_attrs = true,
.pm = &falcon_pcie_pm_ops,
},
};
/* Falcon PCIe driver does not allow module unload */
static int __init falcon_pcie_init(void)
{
return platform_driver_probe(&falcon_pcie_driver, falcon_pcie_probe);
}
device_initcall_sync(falcon_pcie_init);
MODULE_DESCRIPTION("ASR Falcon PCIe Driver");
MODULE_LICENSE("GPL v2");