--- /dev/null
- struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
- struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge);
- unsigned int busn = bus->number;
-
- if (busn == rc->bus_range->start)
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * pci-j721e - PCIe controller driver for TI's J721E SoCs
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include "../../pci.h"
+#include "pcie-cadence.h"
+
+#define ENABLE_REG_SYS_2 0x108
+#define STATUS_REG_SYS_2 0x508
+#define STATUS_CLR_REG_SYS_2 0x708
+#define LINK_DOWN BIT(1)
+
+#define J721E_PCIE_USER_CMD_STATUS 0x4
+#define LINK_TRAINING_ENABLE BIT(0)
+
+#define J721E_PCIE_USER_LINKSTATUS 0x14
+#define LINK_STATUS GENMASK(1, 0)
+
+enum link_status {
+ NO_RECEIVERS_DETECTED,
+ LINK_TRAINING_IN_PROGRESS,
+ LINK_UP_DL_IN_PROGRESS,
+ LINK_UP_DL_COMPLETED,
+};
+
+#define J721E_MODE_RC BIT(7)
+#define LANE_COUNT_MASK BIT(8)
+#define LANE_COUNT(n) ((n) << 8)
+
+#define GENERATION_SEL_MASK GENMASK(1, 0)
+
+#define MAX_LANES 2
+
+struct j721e_pcie {
+ struct device *dev;
+ u32 mode;
+ u32 num_lanes;
+ struct cdns_pcie *cdns_pcie;
+ void __iomem *user_cfg_base;
+ void __iomem *intd_cfg_base;
+};
+
+enum j721e_pcie_mode {
+ PCI_MODE_RC,
+ PCI_MODE_EP,
+};
+
+struct j721e_pcie_data {
+ enum j721e_pcie_mode mode;
+};
+
+static inline u32 j721e_pcie_user_readl(struct j721e_pcie *pcie, u32 offset)
+{
+ return readl(pcie->user_cfg_base + offset);
+}
+
+static inline void j721e_pcie_user_writel(struct j721e_pcie *pcie, u32 offset,
+ u32 value)
+{
+ writel(value, pcie->user_cfg_base + offset);
+}
+
+static inline u32 j721e_pcie_intd_readl(struct j721e_pcie *pcie, u32 offset)
+{
+ return readl(pcie->intd_cfg_base + offset);
+}
+
+static inline void j721e_pcie_intd_writel(struct j721e_pcie *pcie, u32 offset,
+ u32 value)
+{
+ writel(value, pcie->intd_cfg_base + offset);
+}
+
+static irqreturn_t j721e_pcie_link_irq_handler(int irq, void *priv)
+{
+ struct j721e_pcie *pcie = priv;
+ struct device *dev = pcie->dev;
+ u32 reg;
+
+ reg = j721e_pcie_intd_readl(pcie, STATUS_REG_SYS_2);
+ if (!(reg & LINK_DOWN))
+ return IRQ_NONE;
+
+ dev_err(dev, "LINK DOWN!\n");
+
+ j721e_pcie_intd_writel(pcie, STATUS_CLR_REG_SYS_2, LINK_DOWN);
+ return IRQ_HANDLED;
+}
+
+static void j721e_pcie_config_link_irq(struct j721e_pcie *pcie)
+{
+ u32 reg;
+
+ reg = j721e_pcie_intd_readl(pcie, ENABLE_REG_SYS_2);
+ reg |= LINK_DOWN;
+ j721e_pcie_intd_writel(pcie, ENABLE_REG_SYS_2, reg);
+}
+
+static int j721e_pcie_start_link(struct cdns_pcie *cdns_pcie)
+{
+ struct j721e_pcie *pcie = dev_get_drvdata(cdns_pcie->dev);
+ u32 reg;
+
+ reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_CMD_STATUS);
+ reg |= LINK_TRAINING_ENABLE;
+ j721e_pcie_user_writel(pcie, J721E_PCIE_USER_CMD_STATUS, reg);
+
+ return 0;
+}
+
+static void j721e_pcie_stop_link(struct cdns_pcie *cdns_pcie)
+{
+ struct j721e_pcie *pcie = dev_get_drvdata(cdns_pcie->dev);
+ u32 reg;
+
+ reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_CMD_STATUS);
+ reg &= ~LINK_TRAINING_ENABLE;
+ j721e_pcie_user_writel(pcie, J721E_PCIE_USER_CMD_STATUS, reg);
+}
+
+static bool j721e_pcie_link_up(struct cdns_pcie *cdns_pcie)
+{
+ struct j721e_pcie *pcie = dev_get_drvdata(cdns_pcie->dev);
+ u32 reg;
+
+ reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_LINKSTATUS);
+ reg &= LINK_STATUS;
+ if (reg == LINK_UP_DL_COMPLETED)
+ return true;
+
+ return false;
+}
+
+static const struct cdns_pcie_ops j721e_pcie_ops = {
+ .start_link = j721e_pcie_start_link,
+ .stop_link = j721e_pcie_stop_link,
+ .link_up = j721e_pcie_link_up,
+};
+
+static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct regmap *syscon)
+{
+ struct device *dev = pcie->dev;
+ u32 mask = J721E_MODE_RC;
+ u32 mode = pcie->mode;
+ u32 val = 0;
+ int ret = 0;
+
+ if (mode == PCI_MODE_RC)
+ val = J721E_MODE_RC;
+
+ ret = regmap_update_bits(syscon, 0, mask, val);
+ if (ret)
+ dev_err(dev, "failed to set pcie mode\n");
+
+ return ret;
+}
+
+static int j721e_pcie_set_link_speed(struct j721e_pcie *pcie,
+ struct regmap *syscon)
+{
+ struct device *dev = pcie->dev;
+ struct device_node *np = dev->of_node;
+ int link_speed;
+ u32 val = 0;
+ int ret;
+
+ link_speed = of_pci_get_max_link_speed(np);
+ if (link_speed < 2)
+ link_speed = 2;
+
+ val = link_speed - 1;
+ ret = regmap_update_bits(syscon, 0, GENERATION_SEL_MASK, val);
+ if (ret)
+ dev_err(dev, "failed to set link speed\n");
+
+ return ret;
+}
+
+static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie,
+ struct regmap *syscon)
+{
+ struct device *dev = pcie->dev;
+ u32 lanes = pcie->num_lanes;
+ u32 val = 0;
+ int ret;
+
+ val = LANE_COUNT(lanes - 1);
+ ret = regmap_update_bits(syscon, 0, LANE_COUNT_MASK, val);
+ if (ret)
+ dev_err(dev, "failed to set link count\n");
+
+ return ret;
+}
+
+static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ struct device_node *node = dev->of_node;
+ struct regmap *syscon;
+ int ret;
+
+ syscon = syscon_regmap_lookup_by_phandle(node, "ti,syscon-pcie-ctrl");
+ if (IS_ERR(syscon)) {
+ dev_err(dev, "Unable to get ti,syscon-pcie-ctrl regmap\n");
+ return PTR_ERR(syscon);
+ }
+
+ ret = j721e_pcie_set_mode(pcie, syscon);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set pci mode\n");
+ return ret;
+ }
+
+ ret = j721e_pcie_set_link_speed(pcie, syscon);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set link speed\n");
+ return ret;
+ }
+
+ ret = j721e_pcie_set_lane_count(pcie, syscon);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set num-lanes\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cdns_ti_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
- struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
- struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge);
- unsigned int busn = bus->number;
-
- if (busn == rc->bus_range->start)
++ if (pci_is_root_bus(bus))
+ return pci_generic_config_read32(bus, devfn, where, size,
+ value);
+
+ return pci_generic_config_read(bus, devfn, where, size, value);
+}
+
+static int cdns_ti_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
++ if (pci_is_root_bus(bus))
+ return pci_generic_config_write32(bus, devfn, where, size,
+ value);
+
+ return pci_generic_config_write(bus, devfn, where, size, value);
+}
+
+static struct pci_ops cdns_ti_pcie_host_ops = {
+ .map_bus = cdns_pci_map_bus,
+ .read = cdns_ti_pcie_config_read,
+ .write = cdns_ti_pcie_config_write,
+};
+
+static const struct j721e_pcie_data j721e_pcie_rc_data = {
+ .mode = PCI_MODE_RC,
+};
+
+static const struct j721e_pcie_data j721e_pcie_ep_data = {
+ .mode = PCI_MODE_EP,
+};
+
+static const struct of_device_id of_j721e_pcie_match[] = {
+ {
+ .compatible = "ti,j721e-pcie-host",
+ .data = &j721e_pcie_rc_data,
+ },
+ {
+ .compatible = "ti,j721e-pcie-ep",
+ .data = &j721e_pcie_ep_data,
+ },
+ {},
+};
+
+static int j721e_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct pci_host_bridge *bridge;
+ struct j721e_pcie_data *data;
+ struct cdns_pcie *cdns_pcie;
+ struct j721e_pcie *pcie;
+ struct cdns_pcie_rc *rc;
+ struct cdns_pcie_ep *ep;
+ struct gpio_desc *gpiod;
+ void __iomem *base;
+ u32 num_lanes;
+ u32 mode;
+ int ret;
+ int irq;
+
+ data = (struct j721e_pcie_data *)of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ mode = (u32)data->mode;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->dev = dev;
+ pcie->mode = mode;
+
+ base = devm_platform_ioremap_resource_byname(pdev, "intd_cfg");
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+ pcie->intd_cfg_base = base;
+
+ base = devm_platform_ioremap_resource_byname(pdev, "user_cfg");
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+ pcie->user_cfg_base = base;
+
+ ret = of_property_read_u32(node, "num-lanes", &num_lanes);
+ if (ret || num_lanes > MAX_LANES)
+ num_lanes = 1;
+ pcie->num_lanes = num_lanes;
+
+ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)))
+ return -EINVAL;
+
+ irq = platform_get_irq_byname(pdev, "link_state");
+ if (irq < 0)
+ return irq;
+
+ dev_set_drvdata(dev, pcie);
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed\n");
+ goto err_get_sync;
+ }
+
+ ret = j721e_pcie_ctrl_init(pcie);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed\n");
+ goto err_get_sync;
+ }
+
+ ret = devm_request_irq(dev, irq, j721e_pcie_link_irq_handler, 0,
+ "j721e-pcie-link-down-irq", pcie);
+ if (ret < 0) {
+ dev_err(dev, "failed to request link state IRQ %d\n", irq);
+ goto err_get_sync;
+ }
+
+ j721e_pcie_config_link_irq(pcie);
+
+ switch (mode) {
+ case PCI_MODE_RC:
+ if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)) {
+ ret = -ENODEV;
+ goto err_get_sync;
+ }
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
+ if (!bridge) {
+ ret = -ENOMEM;
+ goto err_get_sync;
+ }
+
+ bridge->ops = &cdns_ti_pcie_host_ops;
+ rc = pci_host_bridge_priv(bridge);
+
+ cdns_pcie = &rc->pcie;
+ cdns_pcie->dev = dev;
+ cdns_pcie->ops = &j721e_pcie_ops;
+ pcie->cdns_pcie = cdns_pcie;
+
+ gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod)) {
+ ret = PTR_ERR(gpiod);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get reset GPIO\n");
+ goto err_get_sync;
+ }
+
+ ret = cdns_pcie_init_phy(dev, cdns_pcie);
+ if (ret) {
+ dev_err(dev, "Failed to init phy\n");
+ goto err_get_sync;
+ }
+
+ /*
+ * "Power Sequencing and Reset Signal Timings" table in
+ * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 3.0
+ * indicates PERST# should be deasserted after minimum of 100us
+ * once REFCLK is stable. The REFCLK to the connector in RC
+ * mode is selected while enabling the PHY. So deassert PERST#
+ * after 100 us.
+ */
+ if (gpiod) {
+ usleep_range(100, 200);
+ gpiod_set_value_cansleep(gpiod, 1);
+ }
+
+ ret = cdns_pcie_host_setup(rc);
+ if (ret < 0)
+ goto err_pcie_setup;
+
+ break;
+ case PCI_MODE_EP:
+ if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP)) {
+ ret = -ENODEV;
+ goto err_get_sync;
+ }
+
+ ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+ if (!ep) {
+ ret = -ENOMEM;
+ goto err_get_sync;
+ }
+
+ cdns_pcie = &ep->pcie;
+ cdns_pcie->dev = dev;
+ cdns_pcie->ops = &j721e_pcie_ops;
+ pcie->cdns_pcie = cdns_pcie;
+
+ ret = cdns_pcie_init_phy(dev, cdns_pcie);
+ if (ret) {
+ dev_err(dev, "Failed to init phy\n");
+ goto err_get_sync;
+ }
+
+ ret = cdns_pcie_ep_setup(ep);
+ if (ret < 0)
+ goto err_pcie_setup;
+
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", mode);
+ }
+
+ return 0;
+
+err_pcie_setup:
+ cdns_pcie_disable_phy(cdns_pcie);
+
+err_get_sync:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+
+static int j721e_pcie_remove(struct platform_device *pdev)
+{
+ struct j721e_pcie *pcie = platform_get_drvdata(pdev);
+ struct cdns_pcie *cdns_pcie = pcie->cdns_pcie;
+ struct device *dev = &pdev->dev;
+
+ cdns_pcie_disable_phy(cdns_pcie);
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+static struct platform_driver j721e_pcie_driver = {
+ .probe = j721e_pcie_probe,
+ .remove = j721e_pcie_remove,
+ .driver = {
+ .name = "j721e-pcie",
+ .of_match_table = of_j721e_pcie_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(j721e_pcie_driver);
return 0;
}
- cdns_pcie_set_outbound_region(pcie, fn, 0,
+static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn,
+ u16 interrupt_num)
+{
+ u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
+ u32 tbl_offset, msg_data, reg;
+ struct cdns_pcie *pcie = &ep->pcie;
+ struct pci_epf_msix_tbl *msix_tbl;
+ struct cdns_pcie_epf *epf;
+ u64 pci_addr_mask = 0xff;
+ u64 msg_addr;
+ u16 flags;
+ u8 bir;
+
+ /* Check whether the MSI-X feature has been enabled by the PCI host. */
+ flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSIX_FLAGS);
+ if (!(flags & PCI_MSIX_FLAGS_ENABLE))
+ return -EINVAL;
+
+ reg = cap + PCI_MSIX_TABLE;
+ tbl_offset = cdns_pcie_ep_fn_readl(pcie, fn, reg);
+ bir = tbl_offset & PCI_MSIX_TABLE_BIR;
+ tbl_offset &= PCI_MSIX_TABLE_OFFSET;
+
+ epf = &ep->epf[fn];
+ msix_tbl = epf->epf_bar[bir]->addr + tbl_offset;
+ msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
+ msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
+
+ /* Set the outbound region if needed. */
+ if (ep->irq_pci_addr != (msg_addr & ~pci_addr_mask) ||
+ ep->irq_pci_fn != fn) {
+ /* First region was reserved for IRQ writes. */
++ cdns_pcie_set_outbound_region(pcie, 0, fn, 0,
+ false,
+ ep->irq_phys_addr,
+ msg_addr & ~pci_addr_mask,
+ pci_addr_mask + 1);
+ ep->irq_pci_addr = (msg_addr & ~pci_addr_mask);
+ ep->irq_pci_fn = fn;
+ }
+ writel(msg_data, ep->irq_cpu_addr + (msg_addr & pci_addr_mask));
+
+ return 0;
+}
+
static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
enum pci_epc_irq_type type,
u16 interrupt_num)
return 0;
}
- if (err) {
+static int cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc *rc,
+ enum cdns_pcie_rp_bar bar,
+ u64 cpu_addr, u64 size,
+ unsigned long flags)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ u32 addr0, addr1, aperture, value;
+
+ if (!rc->avail_ib_bar[bar])
+ return -EBUSY;
+
+ rc->avail_ib_bar[bar] = false;
+
+ aperture = ilog2(size);
+ addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(aperture) |
+ (lower_32_bits(cpu_addr) & GENMASK(31, 8));
+ addr1 = upper_32_bits(cpu_addr);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar), addr0);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar), addr1);
+
+ if (bar == RP_NO_BAR)
+ return 0;
+
+ value = cdns_pcie_readl(pcie, CDNS_PCIE_LM_RC_BAR_CFG);
+ value &= ~(LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) |
+ LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) |
+ LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) |
+ LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) |
+ LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 2));
+ if (size + cpu_addr >= SZ_4G) {
+ if (!(flags & IORESOURCE_PREFETCH))
+ value |= LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar);
+ value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar);
+ } else {
+ if (!(flags & IORESOURCE_PREFETCH))
+ value |= LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar);
+ value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar);
+ }
+
+ value |= LM_RC_BAR_CFG_APERTURE(bar, aperture);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
+
+ return 0;
+}
+
+static enum cdns_pcie_rp_bar
+cdns_pcie_host_find_min_bar(struct cdns_pcie_rc *rc, u64 size)
+{
+ enum cdns_pcie_rp_bar bar, sel_bar;
+
+ sel_bar = RP_BAR_UNDEFINED;
+ for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) {
+ if (!rc->avail_ib_bar[bar])
+ continue;
+
+ if (size <= bar_max_size[bar]) {
+ if (sel_bar == RP_BAR_UNDEFINED) {
+ sel_bar = bar;
+ continue;
+ }
+
+ if (bar_max_size[bar] < bar_max_size[sel_bar])
+ sel_bar = bar;
+ }
+ }
+
+ return sel_bar;
+}
+
+static enum cdns_pcie_rp_bar
+cdns_pcie_host_find_max_bar(struct cdns_pcie_rc *rc, u64 size)
+{
+ enum cdns_pcie_rp_bar bar, sel_bar;
+
+ sel_bar = RP_BAR_UNDEFINED;
+ for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) {
+ if (!rc->avail_ib_bar[bar])
+ continue;
+
+ if (size >= bar_max_size[bar]) {
+ if (sel_bar == RP_BAR_UNDEFINED) {
+ sel_bar = bar;
+ continue;
+ }
+
+ if (bar_max_size[bar] > bar_max_size[sel_bar])
+ sel_bar = bar;
+ }
+ }
+
+ return sel_bar;
+}
+
+static int cdns_pcie_host_bar_config(struct cdns_pcie_rc *rc,
+ struct resource_entry *entry)
+{
+ u64 cpu_addr, pci_addr, size, winsize;
+ struct cdns_pcie *pcie = &rc->pcie;
+ struct device *dev = pcie->dev;
+ enum cdns_pcie_rp_bar bar;
+ unsigned long flags;
+ int ret;
+
+ cpu_addr = entry->res->start;
+ pci_addr = entry->res->start - entry->offset;
+ flags = entry->res->flags;
+ size = resource_size(entry->res);
+
+ if (entry->offset) {
+ dev_err(dev, "PCI addr: %llx must be equal to CPU addr: %llx\n",
+ pci_addr, cpu_addr);
+ return -EINVAL;
+ }
+
+ while (size > 0) {
+ /*
+ * Try to find a minimum BAR whose size is greater than
+ * or equal to the remaining resource_entry size. This will
+ * fail if the size of each of the available BARs is less than
+ * the remaining resource_entry size.
+ * If a minimum BAR is found, IB ATU will be configured and
+ * exited.
+ */
+ bar = cdns_pcie_host_find_min_bar(rc, size);
+ if (bar != RP_BAR_UNDEFINED) {
+ ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr,
+ size, flags);
+ if (ret)
+ dev_err(dev, "IB BAR: %d config failed\n", bar);
+ return ret;
+ }
+
+ /*
+ * If the control reaches here, it would mean the remaining
+ * resource_entry size cannot be fitted in a single BAR. So we
+ * find a maximum BAR whose size is less than or equal to the
+ * remaining resource_entry size and split the resource entry
+ * so that part of resource entry is fitted inside the maximum
+ * BAR. The remaining size would be fitted during the next
+ * iteration of the loop.
+ * If a maximum BAR is not found, there is no way we can fit
+ * this resource_entry, so we error out.
+ */
+ bar = cdns_pcie_host_find_max_bar(rc, size);
+ if (bar == RP_BAR_UNDEFINED) {
+ dev_err(dev, "No free BAR to map cpu_addr %llx\n",
+ cpu_addr);
+ return -EINVAL;
+ }
+
+ winsize = bar_max_size[bar];
+ ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr, winsize,
+ flags);
+ if (ret) {
+ dev_err(dev, "IB BAR: %d config failed\n", bar);
+ return ret;
+ }
+
+ size -= winsize;
+ cpu_addr += winsize;
+ }
+
+ return 0;
+}
+
+static int cdns_pcie_host_dma_ranges_cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct resource_entry *entry1, *entry2;
+
+ entry1 = container_of(a, struct resource_entry, node);
+ entry2 = container_of(b, struct resource_entry, node);
+
+ return resource_size(entry2->res) - resource_size(entry1->res);
+}
+
+static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ struct device *dev = pcie->dev;
+ struct device_node *np = dev->of_node;
+ struct pci_host_bridge *bridge;
+ struct resource_entry *entry;
+ u32 no_bar_nbits = 32;
+ int err;
+
+ bridge = pci_host_bridge_from_priv(rc);
+ if (!bridge)
+ return -ENOMEM;
+
+ if (list_empty(&bridge->dma_ranges)) {
+ of_property_read_u32(np, "cdns,no-bar-match-nbits",
+ &no_bar_nbits);
+ err = cdns_pcie_host_bar_ib_config(rc, RP_NO_BAR, 0x0,
+ (u64)1 << no_bar_nbits, 0);
+ if (err)
+ dev_err(dev, "IB BAR: %d config failed\n", RP_NO_BAR);
+ return err;
+ }
+
+ list_sort(NULL, &bridge->dma_ranges, cdns_pcie_host_dma_ranges_cmp);
+
+ resource_list_for_each_entry(entry, &bridge->dma_ranges) {
+ err = cdns_pcie_host_bar_config(rc, entry);
- return err;
- }
++ if (err)
+ dev_err(dev, "Fail to configure IB using dma-ranges\n");
++ return err;
+ }
+
+ return 0;
+}
+
static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
{
struct cdns_pcie *pcie = &rc->pcie;
- struct resource *bus_range = rc->bus_range;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc);
- struct resource *mem_res = pcie->mem_res;
struct resource *cfg_res = rc->cfg_res;
- struct device *dev = pcie->dev;
- struct device_node *np = dev->of_node;
- struct of_pci_range_parser parser;
+ struct resource_entry *entry;
+ u64 cpu_addr = cfg_res->start;
- struct of_pci_range range;
u32 addr0, addr1, desc1;
- int r, err;
- u64 cpu_addr;
- int r, busnr = 0;
++ int r, err, busnr = 0;
+
+ entry = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
+ if (entry)
+ busnr = entry->res->start;
/*
* Reserve region 0 for PCI configure space accesses:
}
static int cdns_pcie_host_init(struct device *dev,
- struct list_head *resources,
struct cdns_pcie_rc *rc)
{
- struct resource *bus_range = NULL;
- struct pci_host_bridge *bridge;
int err;
- bridge = pci_host_bridge_from_priv(rc);
- if (!bridge)
- return -ENOMEM;
-
- /* Parse our PCI ranges and request their resources */
- err = pci_parse_request_of_pci_ranges(dev, resources,
- &bridge->dma_ranges, &bus_range);
- if (err)
- return err;
-
- rc->bus_range = bus_range;
- rc->pcie.bus = bus_range->start;
-
err = cdns_pcie_host_init_root_port(rc);
if (err)
- goto err_out;
-
- err = cdns_pcie_host_init_address_translation(rc);
- if (err)
- goto err_out;
-
- return 0;
+ return err;
- err_out:
- pci_free_resource_list(resources);
- return err;
+ return cdns_pcie_host_init_address_translation(rc);
}
+static int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ int retries;
+
+ /* Check if the link is up or not */
+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ if (cdns_pcie_link_up(pcie)) {
+ dev_info(dev, "Link up\n");
+ return 0;
+ }
+ usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+ }
+
+ return -ETIMEDOUT;
+}
+
int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
{
struct device *dev = rc->pcie.dev;
struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
struct pci_host_bridge *bridge;
- struct list_head resources;
+ enum cdns_pcie_rp_bar bar;
struct cdns_pcie *pcie;
struct resource *res;
int ret;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
rc->cfg_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(rc->cfg_base)) {
- dev_err(dev, "missing \"cfg\"\n");
+ if (IS_ERR(rc->cfg_base))
return PTR_ERR(rc->cfg_base);
- }
rc->cfg_res = res;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
- if (!res) {
- dev_err(dev, "missing \"mem\"\n");
- return -EINVAL;
+ ret = cdns_pcie_start_link(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to start link\n");
+ return ret;
}
- pcie->mem_res = res;
+ ret = cdns_pcie_host_wait_for_link(pcie);
+ if (ret)
+ dev_dbg(dev, "PCIe link never came up\n");
+
+ for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
+ rc->avail_ib_bar[bar] = true;
- ret = cdns_pcie_host_init(dev, &resources, rc);
+ ret = cdns_pcie_host_init(dev, rc);
if (ret)
- goto err_init;
+ return ret;
- list_splice_init(&resources, &bridge->windows);
- bridge->dev.parent = dev;
- bridge->busnr = pcie->bus;
- bridge->ops = &cdns_pcie_host_ops;
+ if (!bridge->ops)
+ bridge->ops = &cdns_pcie_host_ops;
- bridge->map_irq = of_irq_parse_and_map_pci;
- bridge->swizzle_irq = pci_common_swizzle;
ret = pci_host_probe(bridge);
if (ret < 0)
* @dev: pointer to PCIe device
* @cfg_res: start/end offsets in the physical system memory to map PCI
* configuration space accesses
- * @bus_range: first/last buses behind the PCIe host controller
* @cfg_base: IO mapped window to access the PCI configuration space of a
* single function at a time
- * @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address
- * translation (nbits sets into the "no BAR match" register)
* @vendor_id: PCI vendor ID
* @device_id: PCI device ID
+ * @avail_ib_bar: Satus of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or
+ * available
*/
struct cdns_pcie_rc {
struct cdns_pcie pcie;
struct resource *cfg_res;
- struct resource *bus_range;
void __iomem *cfg_base;
- u32 no_bar_nbits;
u32 vendor_id;
u32 device_id;
+ bool avail_ib_bar[CDNS_PCIE_RP_MAX_IB];
+};
+
+/**
+ * struct cdns_pcie_epf - Structure to hold info about endpoint function
+ * @epf_bar: reference to the pci_epf_bar for the six Base Address Registers
+ */
+struct cdns_pcie_epf {
+ struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
};
/**
static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
int devfn)
{
- if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
+ if (pci_is_root_bus(bus) && PCI_SLOT(devfn) != 0)
return false;
- if (bus->number != pcie->root_bus_nr && !advk_pcie_link_up(pcie))
+ /*
+ * If the link goes down after we check for link-up, nothing bad
+ * happens but the config access times out.
+ */
++ if (!pci_is_root_bus(bus) && !advk_pcie_link_up(pcie))
+ return false;
+
return true;
}
err_pm_put:
pm_runtime_put(dev);
-
-err_pm_disable:
pm_runtime_disable(dev);
- pci_free_resource_list(&host->resources);
-
- err_free_bridge:
- pci_free_host_bridge(bridge);
return err;
}
--- /dev/null
- struct resource *bus_range;
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PCIe host controller driver for Xilinx Versal CPM DMA Bridge
+ *
+ * (C) Copyright 2019 - 2020, Xilinx, Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pci-ecam.h>
+
+#include "../pci.h"
+
+/* Register definitions */
+#define XILINX_CPM_PCIE_REG_IDR 0x00000E10
+#define XILINX_CPM_PCIE_REG_IMR 0x00000E14
+#define XILINX_CPM_PCIE_REG_PSCR 0x00000E1C
+#define XILINX_CPM_PCIE_REG_RPSC 0x00000E20
+#define XILINX_CPM_PCIE_REG_RPEFR 0x00000E2C
+#define XILINX_CPM_PCIE_REG_IDRN 0x00000E38
+#define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C
+#define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340
+#define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348
+#define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1)
+
+/* Interrupt registers definitions */
+#define XILINX_CPM_PCIE_INTR_LINK_DOWN 0
+#define XILINX_CPM_PCIE_INTR_HOT_RESET 3
+#define XILINX_CPM_PCIE_INTR_CFG_PCIE_TIMEOUT 4
+#define XILINX_CPM_PCIE_INTR_CFG_TIMEOUT 8
+#define XILINX_CPM_PCIE_INTR_CORRECTABLE 9
+#define XILINX_CPM_PCIE_INTR_NONFATAL 10
+#define XILINX_CPM_PCIE_INTR_FATAL 11
+#define XILINX_CPM_PCIE_INTR_CFG_ERR_POISON 12
+#define XILINX_CPM_PCIE_INTR_PME_TO_ACK_RCVD 15
+#define XILINX_CPM_PCIE_INTR_INTX 16
+#define XILINX_CPM_PCIE_INTR_PM_PME_RCVD 17
+#define XILINX_CPM_PCIE_INTR_SLV_UNSUPP 20
+#define XILINX_CPM_PCIE_INTR_SLV_UNEXP 21
+#define XILINX_CPM_PCIE_INTR_SLV_COMPL 22
+#define XILINX_CPM_PCIE_INTR_SLV_ERRP 23
+#define XILINX_CPM_PCIE_INTR_SLV_CMPABT 24
+#define XILINX_CPM_PCIE_INTR_SLV_ILLBUR 25
+#define XILINX_CPM_PCIE_INTR_MST_DECERR 26
+#define XILINX_CPM_PCIE_INTR_MST_SLVERR 27
+#define XILINX_CPM_PCIE_INTR_SLV_PCIE_TIMEOUT 28
+
+#define IMR(x) BIT(XILINX_CPM_PCIE_INTR_ ##x)
+
+#define XILINX_CPM_PCIE_IMR_ALL_MASK \
+ ( \
+ IMR(LINK_DOWN) | \
+ IMR(HOT_RESET) | \
+ IMR(CFG_PCIE_TIMEOUT) | \
+ IMR(CFG_TIMEOUT) | \
+ IMR(CORRECTABLE) | \
+ IMR(NONFATAL) | \
+ IMR(FATAL) | \
+ IMR(CFG_ERR_POISON) | \
+ IMR(PME_TO_ACK_RCVD) | \
+ IMR(INTX) | \
+ IMR(PM_PME_RCVD) | \
+ IMR(SLV_UNSUPP) | \
+ IMR(SLV_UNEXP) | \
+ IMR(SLV_COMPL) | \
+ IMR(SLV_ERRP) | \
+ IMR(SLV_CMPABT) | \
+ IMR(SLV_ILLBUR) | \
+ IMR(MST_DECERR) | \
+ IMR(MST_SLVERR) | \
+ IMR(SLV_PCIE_TIMEOUT) \
+ )
+
+#define XILINX_CPM_PCIE_IDR_ALL_MASK 0xFFFFFFFF
+#define XILINX_CPM_PCIE_IDRN_MASK GENMASK(19, 16)
+#define XILINX_CPM_PCIE_IDRN_SHIFT 16
+
+/* Root Port Error FIFO Read Register definitions */
+#define XILINX_CPM_PCIE_RPEFR_ERR_VALID BIT(18)
+#define XILINX_CPM_PCIE_RPEFR_REQ_ID GENMASK(15, 0)
+#define XILINX_CPM_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF
+
+/* Root Port Status/control Register definitions */
+#define XILINX_CPM_PCIE_REG_RPSC_BEN BIT(0)
+
+/* Phy Status/Control Register definitions */
+#define XILINX_CPM_PCIE_REG_PSCR_LNKUP BIT(11)
+
+/**
+ * struct xilinx_cpm_pcie_port - PCIe port information
+ * @reg_base: Bridge Register Base
+ * @cpm_base: CPM System Level Control and Status Register(SLCR) Base
+ * @dev: Device pointer
+ * @intx_domain: Legacy IRQ domain pointer
+ * @cpm_domain: CPM IRQ domain pointer
+ * @cfg: Holds mappings of config space window
+ * @intx_irq: legacy interrupt number
+ * @irq: Error interrupt number
+ * @lock: lock protecting shared register access
+ */
+struct xilinx_cpm_pcie_port {
+ void __iomem *reg_base;
+ void __iomem *cpm_base;
+ struct device *dev;
+ struct irq_domain *intx_domain;
+ struct irq_domain *cpm_domain;
+ struct pci_config_window *cfg;
+ int intx_irq;
+ int irq;
+ raw_spinlock_t lock;
+};
+
+static u32 pcie_read(struct xilinx_cpm_pcie_port *port, u32 reg)
+{
+ return readl_relaxed(port->reg_base + reg);
+}
+
+static void pcie_write(struct xilinx_cpm_pcie_port *port,
+ u32 val, u32 reg)
+{
+ writel_relaxed(val, port->reg_base + reg);
+}
+
+static bool cpm_pcie_link_up(struct xilinx_cpm_pcie_port *port)
+{
+ return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) &
+ XILINX_CPM_PCIE_REG_PSCR_LNKUP);
+}
+
+static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie_port *port)
+{
+ unsigned long val = pcie_read(port, XILINX_CPM_PCIE_REG_RPEFR);
+
+ if (val & XILINX_CPM_PCIE_RPEFR_ERR_VALID) {
+ dev_dbg(port->dev, "Requester ID %lu\n",
+ val & XILINX_CPM_PCIE_RPEFR_REQ_ID);
+ pcie_write(port, XILINX_CPM_PCIE_RPEFR_ALL_MASK,
+ XILINX_CPM_PCIE_REG_RPEFR);
+ }
+}
+
+static void xilinx_cpm_mask_leg_irq(struct irq_data *data)
+{
+ struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(data);
+ unsigned long flags;
+ u32 mask;
+ u32 val;
+
+ mask = BIT(data->hwirq + XILINX_CPM_PCIE_IDRN_SHIFT);
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN_MASK);
+ pcie_write(port, (val & (~mask)), XILINX_CPM_PCIE_REG_IDRN_MASK);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void xilinx_cpm_unmask_leg_irq(struct irq_data *data)
+{
+ struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(data);
+ unsigned long flags;
+ u32 mask;
+ u32 val;
+
+ mask = BIT(data->hwirq + XILINX_CPM_PCIE_IDRN_SHIFT);
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN_MASK);
+ pcie_write(port, (val | mask), XILINX_CPM_PCIE_REG_IDRN_MASK);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct irq_chip xilinx_cpm_leg_irq_chip = {
+ .name = "INTx",
+ .irq_mask = xilinx_cpm_mask_leg_irq,
+ .irq_unmask = xilinx_cpm_unmask_leg_irq,
+};
+
+/**
+ * xilinx_cpm_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
+ * @domain: IRQ domain
+ * @irq: Virtual IRQ number
+ * @hwirq: HW interrupt number
+ *
+ * Return: Always returns 0.
+ */
+static int xilinx_cpm_pcie_intx_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &xilinx_cpm_leg_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+/* INTx IRQ Domain operations */
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = xilinx_cpm_pcie_intx_map,
+};
+
+static void xilinx_cpm_pcie_intx_flow(struct irq_desc *desc)
+{
+ struct xilinx_cpm_pcie_port *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned long val;
+ int i;
+
+ chained_irq_enter(chip, desc);
+
+ val = FIELD_GET(XILINX_CPM_PCIE_IDRN_MASK,
+ pcie_read(port, XILINX_CPM_PCIE_REG_IDRN));
+
+ for_each_set_bit(i, &val, PCI_NUM_INTX)
+ generic_handle_irq(irq_find_mapping(port->intx_domain, i));
+
+ chained_irq_exit(chip, desc);
+}
+
+static void xilinx_cpm_mask_event_irq(struct irq_data *d)
+{
+ struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ raw_spin_lock(&port->lock);
+ val = pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
+ val &= ~BIT(d->hwirq);
+ pcie_write(port, val, XILINX_CPM_PCIE_REG_IMR);
+ raw_spin_unlock(&port->lock);
+}
+
+static void xilinx_cpm_unmask_event_irq(struct irq_data *d)
+{
+ struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ raw_spin_lock(&port->lock);
+ val = pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
+ val |= BIT(d->hwirq);
+ pcie_write(port, val, XILINX_CPM_PCIE_REG_IMR);
+ raw_spin_unlock(&port->lock);
+}
+
+static struct irq_chip xilinx_cpm_event_irq_chip = {
+ .name = "RC-Event",
+ .irq_mask = xilinx_cpm_mask_event_irq,
+ .irq_unmask = xilinx_cpm_unmask_event_irq,
+};
+
+static int xilinx_cpm_pcie_event_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &xilinx_cpm_event_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ return 0;
+}
+
+static const struct irq_domain_ops event_domain_ops = {
+ .map = xilinx_cpm_pcie_event_map,
+};
+
+static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
+{
+ struct xilinx_cpm_pcie_port *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned long val;
+ int i;
+
+ chained_irq_enter(chip, desc);
+ val = pcie_read(port, XILINX_CPM_PCIE_REG_IDR);
+ val &= pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
+ for_each_set_bit(i, &val, 32)
+ generic_handle_irq(irq_find_mapping(port->cpm_domain, i));
+ pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR);
+
+ /*
+ * XILINX_CPM_PCIE_MISC_IR_STATUS register is mapped to
+ * CPM SLCR block.
+ */
+ val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS);
+ if (val)
+ writel_relaxed(val,
+ port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS);
+
+ chained_irq_exit(chip, desc);
+}
+
+#define _IC(x, s) \
+ [XILINX_CPM_PCIE_INTR_ ## x] = { __stringify(x), s }
+
+static const struct {
+ const char *sym;
+ const char *str;
+} intr_cause[32] = {
+ _IC(LINK_DOWN, "Link Down"),
+ _IC(HOT_RESET, "Hot reset"),
+ _IC(CFG_TIMEOUT, "ECAM access timeout"),
+ _IC(CORRECTABLE, "Correctable error message"),
+ _IC(NONFATAL, "Non fatal error message"),
+ _IC(FATAL, "Fatal error message"),
+ _IC(SLV_UNSUPP, "Slave unsupported request"),
+ _IC(SLV_UNEXP, "Slave unexpected completion"),
+ _IC(SLV_COMPL, "Slave completion timeout"),
+ _IC(SLV_ERRP, "Slave Error Poison"),
+ _IC(SLV_CMPABT, "Slave Completer Abort"),
+ _IC(SLV_ILLBUR, "Slave Illegal Burst"),
+ _IC(MST_DECERR, "Master decode error"),
+ _IC(MST_SLVERR, "Master slave error"),
+ _IC(CFG_PCIE_TIMEOUT, "PCIe ECAM access timeout"),
+ _IC(CFG_ERR_POISON, "ECAM poisoned completion received"),
+ _IC(PME_TO_ACK_RCVD, "PME_TO_ACK message received"),
+ _IC(PM_PME_RCVD, "PM_PME message received"),
+ _IC(SLV_PCIE_TIMEOUT, "PCIe completion timeout received"),
+};
+
+static irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *dev_id)
+{
+ struct xilinx_cpm_pcie_port *port = dev_id;
+ struct device *dev = port->dev;
+ struct irq_data *d;
+
+ d = irq_domain_get_irq_data(port->cpm_domain, irq);
+
+ switch (d->hwirq) {
+ case XILINX_CPM_PCIE_INTR_CORRECTABLE:
+ case XILINX_CPM_PCIE_INTR_NONFATAL:
+ case XILINX_CPM_PCIE_INTR_FATAL:
+ cpm_pcie_clear_err_interrupts(port);
+ fallthrough;
+
+ default:
+ if (intr_cause[d->hwirq].str)
+ dev_warn(dev, "%s\n", intr_cause[d->hwirq].str);
+ else
+ dev_warn(dev, "Unknown IRQ %ld\n", d->hwirq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void xilinx_cpm_free_irq_domains(struct xilinx_cpm_pcie_port *port)
+{
+ if (port->intx_domain) {
+ irq_domain_remove(port->intx_domain);
+ port->intx_domain = NULL;
+ }
+
+ if (port->cpm_domain) {
+ irq_domain_remove(port->cpm_domain);
+ port->cpm_domain = NULL;
+ }
+}
+
+/**
+ * xilinx_cpm_pcie_init_irq_domain - Initialize IRQ domain
+ * @port: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+
+ /* Setup INTx */
+ pcie_intc_node = of_get_next_child(node, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return -EINVAL;
+ }
+
+ port->cpm_domain = irq_domain_add_linear(pcie_intc_node, 32,
+ &event_domain_ops,
+ port);
+ if (!port->cpm_domain)
+ goto out;
+
+ irq_domain_update_bus_token(port->cpm_domain, DOMAIN_BUS_NEXUS);
+
+ port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+ &intx_domain_ops,
+ port);
+ if (!port->intx_domain)
+ goto out;
+
+ irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
+
+ of_node_put(pcie_intc_node);
+ raw_spin_lock_init(&port->lock);
+
+ return 0;
+out:
+ xilinx_cpm_free_irq_domains(port);
+ dev_err(dev, "Failed to allocate IRQ domains\n");
+
+ return -ENOMEM;
+}
+
+static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int i, irq;
+
+ port->irq = platform_get_irq(pdev, 0);
+ if (port->irq < 0)
+ return port->irq;
+
+ for (i = 0; i < ARRAY_SIZE(intr_cause); i++) {
+ int err;
+
+ if (!intr_cause[i].str)
+ continue;
+
+ irq = irq_create_mapping(port->cpm_domain, i);
+ if (!irq) {
+ dev_err(dev, "Failed to map interrupt\n");
+ return -ENXIO;
+ }
+
+ err = devm_request_irq(dev, irq, xilinx_cpm_pcie_intr_handler,
+ 0, intr_cause[i].sym, port);
+ if (err) {
+ dev_err(dev, "Failed to request IRQ %d\n", irq);
+ return err;
+ }
+ }
+
+ port->intx_irq = irq_create_mapping(port->cpm_domain,
+ XILINX_CPM_PCIE_INTR_INTX);
+ if (!port->intx_irq) {
+ dev_err(dev, "Failed to map INTx interrupt\n");
+ return -ENXIO;
+ }
+
+ /* Plug the INTx chained handler */
+ irq_set_chained_handler_and_data(port->intx_irq,
+ xilinx_cpm_pcie_intx_flow, port);
+
+ /* Plug the main event chained handler */
+ irq_set_chained_handler_and_data(port->irq,
+ xilinx_cpm_pcie_event_flow, port);
+
+ return 0;
+}
+
+/**
+ * xilinx_cpm_pcie_init_port - Initialize hardware
+ * @port: PCIe port information
+ */
+static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie_port *port)
+{
+ if (cpm_pcie_link_up(port))
+ dev_info(port->dev, "PCIe Link is UP\n");
+ else
+ dev_info(port->dev, "PCIe Link is DOWN\n");
+
+ /* Disable all interrupts */
+ pcie_write(port, ~XILINX_CPM_PCIE_IDR_ALL_MASK,
+ XILINX_CPM_PCIE_REG_IMR);
+
+ /* Clear pending interrupts */
+ pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_IDR) &
+ XILINX_CPM_PCIE_IMR_ALL_MASK,
+ XILINX_CPM_PCIE_REG_IDR);
+
+ /*
+ * XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to
+ * CPM SLCR block.
+ */
+ writel(XILINX_CPM_PCIE_MISC_IR_LOCAL,
+ port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE);
+ /* Enable the Bridge enable bit */
+ pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) |
+ XILINX_CPM_PCIE_REG_RPSC_BEN,
+ XILINX_CPM_PCIE_REG_RPSC);
+}
+
+/**
+ * xilinx_cpm_pcie_parse_dt - Parse Device tree
+ * @port: PCIe port information
+ * @bus_range: Bus resource
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie_port *port,
+ struct resource *bus_range)
+{
+ struct device *dev = port->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res;
+
+ port->cpm_base = devm_platform_ioremap_resource_byname(pdev,
+ "cpm_slcr");
+ if (IS_ERR(port->cpm_base))
+ return PTR_ERR(port->cpm_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+ if (!res)
+ return -ENXIO;
+
+ port->cfg = pci_ecam_create(dev, res, bus_range,
+ &pci_generic_ecam_ops);
+ if (IS_ERR(port->cfg))
+ return PTR_ERR(port->cfg);
+
+ port->reg_base = port->cfg->win;
+
+ return 0;
+}
+
+static void xilinx_cpm_free_interrupts(struct xilinx_cpm_pcie_port *port)
+{
+ irq_set_chained_handler_and_data(port->intx_irq, NULL, NULL);
+ irq_set_chained_handler_and_data(port->irq, NULL, NULL);
+}
+
+/**
+ * xilinx_cpm_pcie_probe - Probe function
+ * @pdev: Platform device pointer
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
+{
+ struct xilinx_cpm_pcie_port *port;
+ struct device *dev = &pdev->dev;
+ struct pci_host_bridge *bridge;
- err = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
- &bridge->dma_ranges, &bus_range);
- if (err) {
- dev_err(dev, "Getting bridge resources failed\n");
- return err;
- }
-
++ struct resource_entry *bus;
+ int err;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
+ if (!bridge)
+ return -ENODEV;
+
+ port = pci_host_bridge_priv(bridge);
+
+ port->dev = dev;
+
- err = xilinx_cpm_pcie_parse_dt(port, bus_range);
+ err = xilinx_cpm_pcie_init_irq_domain(port);
+ if (err)
+ return err;
+
++ bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
++ if (!bus)
++ return -ENODEV;
++
++ err = xilinx_cpm_pcie_parse_dt(port, bus->res);
+ if (err) {
+ dev_err(dev, "Parsing DT failed\n");
+ goto err_parse_dt;
+ }
+
+ xilinx_cpm_pcie_init_port(port);
+
+ err = xilinx_cpm_setup_irq(port);
+ if (err) {
+ dev_err(dev, "Failed to set up interrupts\n");
+ goto err_setup_irq;
+ }
+
+ bridge->dev.parent = dev;
+ bridge->sysdata = port->cfg;
+ bridge->busnr = port->cfg->busr.start;
+ bridge->ops = (struct pci_ops *)&pci_generic_ecam_ops.pci_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
+
+ err = pci_host_probe(bridge);
+ if (err < 0)
+ goto err_host_bridge;
+
+ return 0;
+
+err_host_bridge:
+ xilinx_cpm_free_interrupts(port);
+err_setup_irq:
+ pci_ecam_free(port->cfg);
+err_parse_dt:
+ xilinx_cpm_free_irq_domains(port);
+ return err;
+}
+
+static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
+ { .compatible = "xlnx,versal-cpm-host-1.00", },
+ {}
+};
+
+static struct platform_driver xilinx_cpm_pcie_driver = {
+ .driver = {
+ .name = "xilinx-cpm-pcie",
+ .of_match_table = xilinx_cpm_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = xilinx_cpm_pcie_probe,
+};
+
+builtin_platform_driver(xilinx_cpm_pcie_driver);