1 // SPDX-License-Identifier: GPL-2.0
3 * Synopsys DesignWare PCIe host controller driver
5 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6 * https://www.samsung.com
8 * Author: Jingoo Han <jg1.han@samsung.com>
11 #include <linux/iopoll.h>
12 #include <linux/irqchip/chained_irq.h>
13 #include <linux/irqdomain.h>
14 #include <linux/msi.h>
15 #include <linux/of_address.h>
16 #include <linux/of_pci.h>
17 #include <linux/pci_regs.h>
18 #include <linux/platform_device.h>
20 #include "../../pci.h"
21 #include "pcie-designware.h"
23 static struct pci_ops dw_pcie_ops;
24 static struct pci_ops dw_child_pcie_ops;
26 static void dw_msi_ack_irq(struct irq_data *d)
28 irq_chip_ack_parent(d);
31 static void dw_msi_mask_irq(struct irq_data *d)
34 irq_chip_mask_parent(d);
37 static void dw_msi_unmask_irq(struct irq_data *d)
39 pci_msi_unmask_irq(d);
40 irq_chip_unmask_parent(d);
43 static struct irq_chip dw_pcie_msi_irq_chip = {
45 .irq_ack = dw_msi_ack_irq,
46 .irq_mask = dw_msi_mask_irq,
47 .irq_unmask = dw_msi_unmask_irq,
50 static struct msi_domain_info dw_pcie_msi_domain_info = {
51 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
52 MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
53 .chip = &dw_pcie_msi_irq_chip,
57 irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
61 u32 status, num_ctrls;
62 irqreturn_t ret = IRQ_NONE;
63 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
65 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
67 for (i = 0; i < num_ctrls; i++) {
68 status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
69 (i * MSI_REG_CTRL_BLOCK_SIZE));
76 while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
77 pos)) != MAX_MSI_IRQS_PER_CTRL) {
78 generic_handle_domain_irq(pp->irq_domain,
79 (i * MAX_MSI_IRQS_PER_CTRL) +
88 /* Chained MSI interrupt service routine */
89 static void dw_chained_msi_isr(struct irq_desc *desc)
91 struct irq_chip *chip = irq_desc_get_chip(desc);
92 struct dw_pcie_rp *pp;
94 chained_irq_enter(chip, desc);
96 pp = irq_desc_get_handler_data(desc);
97 dw_handle_msi_irq(pp);
99 chained_irq_exit(chip, desc);
102 static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
104 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
105 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
108 msi_target = (u64)pp->msi_data;
110 msg->address_lo = lower_32_bits(msi_target);
111 msg->address_hi = upper_32_bits(msi_target);
113 msg->data = d->hwirq;
115 dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
116 (int)d->hwirq, msg->address_hi, msg->address_lo);
119 static int dw_pci_msi_set_affinity(struct irq_data *d,
120 const struct cpumask *mask, bool force)
125 static void dw_pci_bottom_mask(struct irq_data *d)
127 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
128 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
129 unsigned int res, bit, ctrl;
132 raw_spin_lock_irqsave(&pp->lock, flags);
134 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
135 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
136 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
138 pp->irq_mask[ctrl] |= BIT(bit);
139 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
141 raw_spin_unlock_irqrestore(&pp->lock, flags);
144 static void dw_pci_bottom_unmask(struct irq_data *d)
146 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
147 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
148 unsigned int res, bit, ctrl;
151 raw_spin_lock_irqsave(&pp->lock, flags);
153 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
154 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
155 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
157 pp->irq_mask[ctrl] &= ~BIT(bit);
158 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
160 raw_spin_unlock_irqrestore(&pp->lock, flags);
163 static void dw_pci_bottom_ack(struct irq_data *d)
165 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
166 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
167 unsigned int res, bit, ctrl;
169 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
170 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
171 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
173 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
176 static struct irq_chip dw_pci_msi_bottom_irq_chip = {
178 .irq_ack = dw_pci_bottom_ack,
179 .irq_compose_msi_msg = dw_pci_setup_msi_msg,
180 .irq_set_affinity = dw_pci_msi_set_affinity,
181 .irq_mask = dw_pci_bottom_mask,
182 .irq_unmask = dw_pci_bottom_unmask,
185 static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
186 unsigned int virq, unsigned int nr_irqs,
189 struct dw_pcie_rp *pp = domain->host_data;
194 raw_spin_lock_irqsave(&pp->lock, flags);
196 bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
197 order_base_2(nr_irqs));
199 raw_spin_unlock_irqrestore(&pp->lock, flags);
204 for (i = 0; i < nr_irqs; i++)
205 irq_domain_set_info(domain, virq + i, bit + i,
213 static void dw_pcie_irq_domain_free(struct irq_domain *domain,
214 unsigned int virq, unsigned int nr_irqs)
216 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
217 struct dw_pcie_rp *pp = domain->host_data;
220 raw_spin_lock_irqsave(&pp->lock, flags);
222 bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
223 order_base_2(nr_irqs));
225 raw_spin_unlock_irqrestore(&pp->lock, flags);
228 static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
229 .alloc = dw_pcie_irq_domain_alloc,
230 .free = dw_pcie_irq_domain_free,
233 int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
235 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
236 struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
238 pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
239 &dw_pcie_msi_domain_ops, pp);
240 if (!pp->irq_domain) {
241 dev_err(pci->dev, "Failed to create IRQ domain\n");
245 irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
247 pp->msi_domain = pci_msi_create_irq_domain(fwnode,
248 &dw_pcie_msi_domain_info,
250 if (!pp->msi_domain) {
251 dev_err(pci->dev, "Failed to create MSI domain\n");
252 irq_domain_remove(pp->irq_domain);
259 static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
263 for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
264 if (pp->msi_irq[ctrl] > 0)
265 irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
269 irq_domain_remove(pp->msi_domain);
270 irq_domain_remove(pp->irq_domain);
273 static void dw_pcie_msi_init(struct dw_pcie_rp *pp)
275 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
276 u64 msi_target = (u64)pp->msi_data;
278 if (!pci_msi_enabled() || !pp->has_msi_ctrl)
281 /* Program the msi_data */
282 dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
283 dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
286 static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
288 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
289 struct device *dev = pci->dev;
290 struct platform_device *pdev = to_platform_device(dev);
291 u32 ctrl, max_vectors;
294 /* Parse any "msiX" IRQs described in the devicetree */
295 for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
296 char msi_name[] = "msiX";
298 msi_name[3] = '0' + ctrl;
299 irq = platform_get_irq_byname_optional(pdev, msi_name);
303 return dev_err_probe(dev, irq,
304 "Failed to parse MSI IRQ '%s'\n",
307 pp->msi_irq[ctrl] = irq;
310 /* If no "msiX" IRQs, caller should fallback to "msi" IRQ */
314 max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL;
315 if (pp->num_vectors > max_vectors) {
316 dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n",
318 pp->num_vectors = max_vectors;
320 if (!pp->num_vectors)
321 pp->num_vectors = max_vectors;
326 static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
328 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
329 struct device *dev = pci->dev;
330 struct platform_device *pdev = to_platform_device(dev);
335 for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++)
336 pp->irq_mask[ctrl] = ~0;
338 if (!pp->msi_irq[0]) {
339 ret = dw_pcie_parse_split_msi_irq(pp);
340 if (ret < 0 && ret != -ENXIO)
344 if (!pp->num_vectors)
345 pp->num_vectors = MSI_DEF_NUM_VECTORS;
346 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
348 if (!pp->msi_irq[0]) {
349 pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi");
350 if (pp->msi_irq[0] < 0) {
351 pp->msi_irq[0] = platform_get_irq(pdev, 0);
352 if (pp->msi_irq[0] < 0)
353 return pp->msi_irq[0];
357 dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors);
359 pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
361 ret = dw_pcie_allocate_domains(pp);
365 for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
366 if (pp->msi_irq[ctrl] > 0)
367 irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
368 dw_chained_msi_isr, pp);
372 * Even though the iMSI-RX Module supports 64-bit addresses some
373 * peripheral PCIe devices may lack 64-bit message support. In
374 * order not to miss MSI TLPs from those devices the MSI target
375 * address has to be within the lowest 4GB.
377 * Note until there is a better alternative found the reservation is
378 * done by allocating from the artificially limited DMA-coherent
381 ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
383 dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
385 msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
388 dev_err(dev, "Failed to alloc and map MSI data\n");
389 dw_pcie_free_msi(pp);
396 int dw_pcie_host_init(struct dw_pcie_rp *pp)
398 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
399 struct device *dev = pci->dev;
400 struct device_node *np = dev->of_node;
401 struct platform_device *pdev = to_platform_device(dev);
402 struct resource_entry *win;
403 struct pci_host_bridge *bridge;
404 struct resource *res;
407 raw_spin_lock_init(&pp->lock);
409 ret = dw_pcie_get_resources(pci);
413 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
415 pp->cfg0_size = resource_size(res);
416 pp->cfg0_base = res->start;
418 pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
419 if (IS_ERR(pp->va_cfg0_base))
420 return PTR_ERR(pp->va_cfg0_base);
422 dev_err(dev, "Missing *config* reg space\n");
426 bridge = devm_pci_alloc_host_bridge(dev, 0);
432 /* Get the I/O range from DT */
433 win = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
435 pp->io_size = resource_size(win->res);
436 pp->io_bus_addr = win->res->start - win->offset;
437 pp->io_base = pci_pio_to_address(win->res->start);
440 /* Set default bus ops */
441 bridge->ops = &dw_pcie_ops;
442 bridge->child_ops = &dw_child_pcie_ops;
444 if (pp->ops->host_init) {
445 ret = pp->ops->host_init(pp);
450 if (pci_msi_enabled()) {
451 pp->has_msi_ctrl = !(pp->ops->msi_host_init ||
452 of_property_read_bool(np, "msi-parent") ||
453 of_property_read_bool(np, "msi-map"));
456 * For the has_msi_ctrl case the default assignment is handled
457 * in the dw_pcie_msi_host_init().
459 if (!pp->has_msi_ctrl && !pp->num_vectors) {
460 pp->num_vectors = MSI_DEF_NUM_VECTORS;
461 } else if (pp->num_vectors > MAX_MSI_IRQS) {
462 dev_err(dev, "Invalid number of vectors\n");
464 goto err_deinit_host;
467 if (pp->ops->msi_host_init) {
468 ret = pp->ops->msi_host_init(pp);
470 goto err_deinit_host;
471 } else if (pp->has_msi_ctrl) {
472 ret = dw_pcie_msi_host_init(pp);
474 goto err_deinit_host;
478 dw_pcie_version_detect(pci);
480 dw_pcie_iatu_detect(pci);
482 ret = dw_pcie_edma_detect(pci);
486 ret = dw_pcie_setup_rc(pp);
488 goto err_remove_edma;
490 if (!dw_pcie_link_up(pci)) {
491 ret = dw_pcie_start_link(pci);
493 goto err_remove_edma;
496 /* Ignore errors, the link may come up later */
497 dw_pcie_wait_for_link(pci);
499 bridge->sysdata = pp;
501 ret = pci_host_probe(bridge);
508 dw_pcie_stop_link(pci);
511 dw_pcie_edma_remove(pci);
514 if (pp->has_msi_ctrl)
515 dw_pcie_free_msi(pp);
518 if (pp->ops->host_deinit)
519 pp->ops->host_deinit(pp);
523 EXPORT_SYMBOL_GPL(dw_pcie_host_init);
525 void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
527 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
529 pci_stop_root_bus(pp->bridge->bus);
530 pci_remove_root_bus(pp->bridge->bus);
532 dw_pcie_stop_link(pci);
534 dw_pcie_edma_remove(pci);
536 if (pp->has_msi_ctrl)
537 dw_pcie_free_msi(pp);
539 if (pp->ops->host_deinit)
540 pp->ops->host_deinit(pp);
542 EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
544 static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
545 unsigned int devfn, int where)
547 struct dw_pcie_rp *pp = bus->sysdata;
548 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
553 * Checking whether the link is up here is a last line of defense
554 * against platforms that forward errors on the system bus as
555 * SError upon PCI configuration transactions issued when the link
556 * is down. This check is racy by definition and does not stop
557 * the system from triggering an SError if the link goes down
558 * after this check is performed.
560 if (!dw_pcie_link_up(pci))
563 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
564 PCIE_ATU_FUNC(PCI_FUNC(devfn));
566 if (pci_is_root_bus(bus->parent))
567 type = PCIE_ATU_TYPE_CFG0;
569 type = PCIE_ATU_TYPE_CFG1;
571 ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev,
576 return pp->va_cfg0_base + where;
579 static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
580 int where, int size, u32 *val)
582 struct dw_pcie_rp *pp = bus->sysdata;
583 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
586 ret = pci_generic_config_read(bus, devfn, where, size, val);
587 if (ret != PCIBIOS_SUCCESSFUL)
590 if (pp->cfg0_io_shared) {
591 ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
592 pp->io_base, pp->io_bus_addr,
595 return PCIBIOS_SET_FAILED;
598 return PCIBIOS_SUCCESSFUL;
601 static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
602 int where, int size, u32 val)
604 struct dw_pcie_rp *pp = bus->sysdata;
605 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
608 ret = pci_generic_config_write(bus, devfn, where, size, val);
609 if (ret != PCIBIOS_SUCCESSFUL)
612 if (pp->cfg0_io_shared) {
613 ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
614 pp->io_base, pp->io_bus_addr,
617 return PCIBIOS_SET_FAILED;
620 return PCIBIOS_SUCCESSFUL;
623 static struct pci_ops dw_child_pcie_ops = {
624 .map_bus = dw_pcie_other_conf_map_bus,
625 .read = dw_pcie_rd_other_conf,
626 .write = dw_pcie_wr_other_conf,
629 void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
631 struct dw_pcie_rp *pp = bus->sysdata;
632 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
634 if (PCI_SLOT(devfn) > 0)
637 return pci->dbi_base + where;
639 EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
641 static struct pci_ops dw_pcie_ops = {
642 .map_bus = dw_pcie_own_conf_map_bus,
643 .read = pci_generic_config_read,
644 .write = pci_generic_config_write,
647 static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
649 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
650 struct resource_entry *entry;
653 /* Note the very first outbound ATU is used for CFG IOs */
654 if (!pci->num_ob_windows) {
655 dev_err(pci->dev, "No outbound iATU found\n");
660 * Ensure all out/inbound windows are disabled before proceeding with
661 * the MEM/IO (dma-)ranges setups.
663 for (i = 0; i < pci->num_ob_windows; i++)
664 dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i);
666 for (i = 0; i < pci->num_ib_windows; i++)
667 dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, i);
670 resource_list_for_each_entry(entry, &pp->bridge->windows) {
671 if (resource_type(entry->res) != IORESOURCE_MEM)
674 if (pci->num_ob_windows <= ++i)
677 ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM,
679 entry->res->start - entry->offset,
680 resource_size(entry->res));
682 dev_err(pci->dev, "Failed to set MEM range %pr\n",
689 if (pci->num_ob_windows > ++i) {
690 ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO,
695 dev_err(pci->dev, "Failed to set IO range %pr\n",
700 pp->cfg0_io_shared = true;
704 if (pci->num_ob_windows <= i)
705 dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n",
706 pci->num_ob_windows);
709 resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) {
710 if (resource_type(entry->res) != IORESOURCE_MEM)
713 if (pci->num_ib_windows <= i)
716 ret = dw_pcie_prog_inbound_atu(pci, i++, PCIE_ATU_TYPE_MEM,
718 entry->res->start - entry->offset,
719 resource_size(entry->res));
721 dev_err(pci->dev, "Failed to set DMA range %pr\n",
727 if (pci->num_ib_windows <= i)
728 dev_warn(pci->dev, "Dma-ranges exceed inbound iATU size (%u)\n",
729 pci->num_ib_windows);
734 int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
736 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
737 u32 val, ctrl, num_ctrls;
741 * Enable DBI read-only registers for writing/updating configuration.
742 * Write permission gets disabled towards the end of this function.
744 dw_pcie_dbi_ro_wr_en(pci);
748 if (pp->has_msi_ctrl) {
749 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
751 /* Initialize IRQ Status array */
752 for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
753 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
754 (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
756 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
757 (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
762 dw_pcie_msi_init(pp);
765 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
766 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
768 /* Setup interrupt pins */
769 val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
772 dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
774 /* Setup bus numbers */
775 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
778 dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
780 /* Setup command register */
781 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
783 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
784 PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
785 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
788 * If the platform provides its own child bus config accesses, it means
789 * the platform uses its own address translation component rather than
790 * ATU, so we should not program the ATU here.
792 if (pp->bridge->child_ops == &dw_child_pcie_ops) {
793 ret = dw_pcie_iatu_setup(pp);
798 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
800 /* Program correct class for RC */
801 dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
803 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
804 val |= PORT_LOGIC_SPEED_CHANGE;
805 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
807 dw_pcie_dbi_ro_wr_dis(pci);
811 EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
813 int dw_pcie_suspend_noirq(struct dw_pcie *pci)
815 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
820 * If L1SS is supported, then do not put the link into L2 as some
821 * devices such as NVMe expect low resume latency.
823 if (dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM_L1)
826 if (dw_pcie_get_ltssm(pci) <= DW_PCIE_LTSSM_DETECT_ACT)
829 if (!pci->pp.ops->pme_turn_off)
832 pci->pp.ops->pme_turn_off(&pci->pp);
834 ret = read_poll_timeout(dw_pcie_get_ltssm, val, val == DW_PCIE_LTSSM_L2_IDLE,
835 PCIE_PME_TO_L2_TIMEOUT_US/10,
836 PCIE_PME_TO_L2_TIMEOUT_US, false, pci);
838 dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val);
842 if (pci->pp.ops->host_deinit)
843 pci->pp.ops->host_deinit(&pci->pp);
845 pci->suspended = true;
849 EXPORT_SYMBOL_GPL(dw_pcie_suspend_noirq);
851 int dw_pcie_resume_noirq(struct dw_pcie *pci)
858 pci->suspended = false;
860 if (pci->pp.ops->host_init) {
861 ret = pci->pp.ops->host_init(&pci->pp);
863 dev_err(pci->dev, "Host init failed: %d\n", ret);
868 dw_pcie_setup_rc(&pci->pp);
870 ret = dw_pcie_start_link(pci);
874 ret = dw_pcie_wait_for_link(pci);
880 EXPORT_SYMBOL_GPL(dw_pcie_resume_noirq);