1 // SPDX-License-Identifier: GPL-2.0
3 * Synopsys DesignWare PCIe host controller driver
5 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com
8 * Author: Jingoo Han <jg1.han@samsung.com>
11 #include <linux/irqchip/chained_irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/of_address.h>
14 #include <linux/of_pci.h>
15 #include <linux/pci_regs.h>
16 #include <linux/platform_device.h>
18 #include "../../pci.h"
19 #include "pcie-designware.h"
21 static struct pci_ops dw_pcie_ops;
23 static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
28 if (pp->ops->rd_own_conf)
29 return pp->ops->rd_own_conf(pp, where, size, val);
31 pci = to_dw_pcie_from_pp(pp);
32 return dw_pcie_read(pci->dbi_base + where, size, val);
35 static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
40 if (pp->ops->wr_own_conf)
41 return pp->ops->wr_own_conf(pp, where, size, val);
43 pci = to_dw_pcie_from_pp(pp);
44 return dw_pcie_write(pci->dbi_base + where, size, val);
47 static void dw_msi_ack_irq(struct irq_data *d)
49 irq_chip_ack_parent(d);
52 static void dw_msi_mask_irq(struct irq_data *d)
55 irq_chip_mask_parent(d);
58 static void dw_msi_unmask_irq(struct irq_data *d)
60 pci_msi_unmask_irq(d);
61 irq_chip_unmask_parent(d);
64 static struct irq_chip dw_pcie_msi_irq_chip = {
66 .irq_ack = dw_msi_ack_irq,
67 .irq_mask = dw_msi_mask_irq,
68 .irq_unmask = dw_msi_unmask_irq,
71 static struct msi_domain_info dw_pcie_msi_domain_info = {
72 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
73 MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
74 .chip = &dw_pcie_msi_irq_chip,
78 irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
82 irqreturn_t ret = IRQ_NONE;
84 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
86 for (i = 0; i < num_ctrls; i++) {
87 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS +
88 (i * MSI_REG_CTRL_BLOCK_SIZE),
95 while ((pos = find_next_bit((unsigned long *) &val,
96 MAX_MSI_IRQS_PER_CTRL,
97 pos)) != MAX_MSI_IRQS_PER_CTRL) {
98 irq = irq_find_mapping(pp->irq_domain,
99 (i * MAX_MSI_IRQS_PER_CTRL) +
101 generic_handle_irq(irq);
109 /* Chained MSI interrupt service routine */
110 static void dw_chained_msi_isr(struct irq_desc *desc)
112 struct irq_chip *chip = irq_desc_get_chip(desc);
113 struct pcie_port *pp;
115 chained_irq_enter(chip, desc);
117 pp = irq_desc_get_handler_data(desc);
118 dw_handle_msi_irq(pp);
120 chained_irq_exit(chip, desc);
123 static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
125 struct pcie_port *pp = irq_data_get_irq_chip_data(d);
126 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
129 msi_target = (u64)pp->msi_data;
131 msg->address_lo = lower_32_bits(msi_target);
132 msg->address_hi = upper_32_bits(msi_target);
134 msg->data = d->hwirq;
136 dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
137 (int)d->hwirq, msg->address_hi, msg->address_lo);
140 static int dw_pci_msi_set_affinity(struct irq_data *d,
141 const struct cpumask *mask, bool force)
146 static void dw_pci_bottom_mask(struct irq_data *d)
148 struct pcie_port *pp = irq_data_get_irq_chip_data(d);
149 unsigned int res, bit, ctrl;
152 raw_spin_lock_irqsave(&pp->lock, flags);
154 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
155 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
156 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
158 pp->irq_mask[ctrl] |= BIT(bit);
159 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
162 raw_spin_unlock_irqrestore(&pp->lock, flags);
165 static void dw_pci_bottom_unmask(struct irq_data *d)
167 struct pcie_port *pp = irq_data_get_irq_chip_data(d);
168 unsigned int res, bit, ctrl;
171 raw_spin_lock_irqsave(&pp->lock, flags);
173 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
174 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
175 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
177 pp->irq_mask[ctrl] &= ~BIT(bit);
178 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
181 raw_spin_unlock_irqrestore(&pp->lock, flags);
184 static void dw_pci_bottom_ack(struct irq_data *d)
186 struct pcie_port *pp = irq_data_get_irq_chip_data(d);
187 unsigned int res, bit, ctrl;
189 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
190 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
191 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
193 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, BIT(bit));
196 static struct irq_chip dw_pci_msi_bottom_irq_chip = {
198 .irq_ack = dw_pci_bottom_ack,
199 .irq_compose_msi_msg = dw_pci_setup_msi_msg,
200 .irq_set_affinity = dw_pci_msi_set_affinity,
201 .irq_mask = dw_pci_bottom_mask,
202 .irq_unmask = dw_pci_bottom_unmask,
205 static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
206 unsigned int virq, unsigned int nr_irqs,
209 struct pcie_port *pp = domain->host_data;
214 raw_spin_lock_irqsave(&pp->lock, flags);
216 bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
217 order_base_2(nr_irqs));
219 raw_spin_unlock_irqrestore(&pp->lock, flags);
224 for (i = 0; i < nr_irqs; i++)
225 irq_domain_set_info(domain, virq + i, bit + i,
233 static void dw_pcie_irq_domain_free(struct irq_domain *domain,
234 unsigned int virq, unsigned int nr_irqs)
236 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
237 struct pcie_port *pp = irq_data_get_irq_chip_data(d);
240 raw_spin_lock_irqsave(&pp->lock, flags);
242 bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
243 order_base_2(nr_irqs));
245 raw_spin_unlock_irqrestore(&pp->lock, flags);
248 static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
249 .alloc = dw_pcie_irq_domain_alloc,
250 .free = dw_pcie_irq_domain_free,
253 int dw_pcie_allocate_domains(struct pcie_port *pp)
255 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
256 struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
258 pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
259 &dw_pcie_msi_domain_ops, pp);
260 if (!pp->irq_domain) {
261 dev_err(pci->dev, "Failed to create IRQ domain\n");
265 pp->msi_domain = pci_msi_create_irq_domain(fwnode,
266 &dw_pcie_msi_domain_info,
268 if (!pp->msi_domain) {
269 dev_err(pci->dev, "Failed to create MSI domain\n");
270 irq_domain_remove(pp->irq_domain);
277 void dw_pcie_free_msi(struct pcie_port *pp)
279 irq_set_chained_handler(pp->msi_irq, NULL);
280 irq_set_handler_data(pp->msi_irq, NULL);
282 irq_domain_remove(pp->msi_domain);
283 irq_domain_remove(pp->irq_domain);
286 void dw_pcie_msi_init(struct pcie_port *pp)
288 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
289 struct device *dev = pci->dev;
293 page = alloc_page(GFP_KERNEL);
294 pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
295 if (dma_mapping_error(dev, pp->msi_data)) {
296 dev_err(dev, "Failed to map MSI data\n");
300 msi_target = (u64)pp->msi_data;
302 /* Program the msi_data */
303 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
304 lower_32_bits(msi_target));
305 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
306 upper_32_bits(msi_target));
309 int dw_pcie_host_init(struct pcie_port *pp)
311 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
312 struct device *dev = pci->dev;
313 struct device_node *np = dev->of_node;
314 struct platform_device *pdev = to_platform_device(dev);
315 struct resource_entry *win, *tmp;
316 struct pci_bus *bus, *child;
317 struct pci_host_bridge *bridge;
318 struct resource *cfg_res;
321 raw_spin_lock_init(&pci->pp.lock);
323 cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
325 pp->cfg0_size = resource_size(cfg_res) >> 1;
326 pp->cfg1_size = resource_size(cfg_res) >> 1;
327 pp->cfg0_base = cfg_res->start;
328 pp->cfg1_base = cfg_res->start + pp->cfg0_size;
329 } else if (!pp->va_cfg0_base) {
330 dev_err(dev, "Missing *config* reg space\n");
333 bridge = pci_alloc_host_bridge(0);
337 ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
338 &bridge->windows, &pp->io_base);
342 ret = devm_request_pci_bus_resources(dev, &bridge->windows);
346 /* Get the I/O and memory ranges from DT */
347 resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
348 switch (resource_type(win->res)) {
350 ret = devm_pci_remap_iospace(dev, win->res,
353 dev_warn(dev, "Error %d: failed to map resource %pR\n",
355 resource_list_destroy_entry(win);
358 pp->io->name = "I/O";
359 pp->io_size = resource_size(pp->io);
360 pp->io_bus_addr = pp->io->start - win->offset;
365 pp->mem->name = "MEM";
366 pp->mem_size = resource_size(pp->mem);
367 pp->mem_bus_addr = pp->mem->start - win->offset;
371 pp->cfg0_size = resource_size(pp->cfg) >> 1;
372 pp->cfg1_size = resource_size(pp->cfg) >> 1;
373 pp->cfg0_base = pp->cfg->start;
374 pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
382 if (!pci->dbi_base) {
383 pci->dbi_base = devm_pci_remap_cfgspace(dev,
385 resource_size(pp->cfg));
386 if (!pci->dbi_base) {
387 dev_err(dev, "Error with ioremap\n");
393 pp->mem_base = pp->mem->start;
395 if (!pp->va_cfg0_base) {
396 pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
397 pp->cfg0_base, pp->cfg0_size);
398 if (!pp->va_cfg0_base) {
399 dev_err(dev, "Error with ioremap in function\n");
405 if (!pp->va_cfg1_base) {
406 pp->va_cfg1_base = devm_pci_remap_cfgspace(dev,
409 if (!pp->va_cfg1_base) {
410 dev_err(dev, "Error with ioremap\n");
416 ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
418 pci->num_viewport = 2;
420 if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_enabled()) {
422 * If a specific SoC driver needs to change the
423 * default number of vectors, it needs to implement
424 * the set_num_vectors callback.
426 if (!pp->ops->set_num_vectors) {
427 pp->num_vectors = MSI_DEF_NUM_VECTORS;
429 pp->ops->set_num_vectors(pp);
431 if (pp->num_vectors > MAX_MSI_IRQS ||
432 pp->num_vectors == 0) {
434 "Invalid number of vectors\n");
439 if (!pp->ops->msi_host_init) {
440 pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
442 ret = dw_pcie_allocate_domains(pp);
447 irq_set_chained_handler_and_data(pp->msi_irq,
451 ret = pp->ops->msi_host_init(pp);
457 if (pp->ops->host_init) {
458 ret = pp->ops->host_init(pp);
463 pp->root_bus_nr = pp->busn->start;
465 bridge->dev.parent = dev;
466 bridge->sysdata = pp;
467 bridge->busnr = pp->root_bus_nr;
468 bridge->ops = &dw_pcie_ops;
469 bridge->map_irq = of_irq_parse_and_map_pci;
470 bridge->swizzle_irq = pci_common_swizzle;
472 ret = pci_scan_root_bus_bridge(bridge);
478 if (pp->ops->scan_bus)
479 pp->ops->scan_bus(pp);
481 pci_bus_size_bridges(bus);
482 pci_bus_assign_resources(bus);
484 list_for_each_entry(child, &bus->children, node)
485 pcie_bus_configure_settings(child);
487 pci_bus_add_devices(bus);
491 pci_free_host_bridge(bridge);
495 static int dw_pcie_access_other_conf(struct pcie_port *pp, struct pci_bus *bus,
496 u32 devfn, int where, int size, u32 *val,
500 u32 busdev, cfg_size;
502 void __iomem *va_cfg_base;
503 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
505 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
506 PCIE_ATU_FUNC(PCI_FUNC(devfn));
508 if (bus->parent->number == pp->root_bus_nr) {
509 type = PCIE_ATU_TYPE_CFG0;
510 cpu_addr = pp->cfg0_base;
511 cfg_size = pp->cfg0_size;
512 va_cfg_base = pp->va_cfg0_base;
514 type = PCIE_ATU_TYPE_CFG1;
515 cpu_addr = pp->cfg1_base;
516 cfg_size = pp->cfg1_size;
517 va_cfg_base = pp->va_cfg1_base;
520 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
524 ret = dw_pcie_write(va_cfg_base + where, size, *val);
526 ret = dw_pcie_read(va_cfg_base + where, size, val);
528 if (pci->num_viewport <= 2)
529 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
530 PCIE_ATU_TYPE_IO, pp->io_base,
531 pp->io_bus_addr, pp->io_size);
536 static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
537 u32 devfn, int where, int size, u32 *val)
539 if (pp->ops->rd_other_conf)
540 return pp->ops->rd_other_conf(pp, bus, devfn, where,
543 return dw_pcie_access_other_conf(pp, bus, devfn, where, size, val,
547 static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
548 u32 devfn, int where, int size, u32 val)
550 if (pp->ops->wr_other_conf)
551 return pp->ops->wr_other_conf(pp, bus, devfn, where,
554 return dw_pcie_access_other_conf(pp, bus, devfn, where, size, &val,
558 static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
561 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
563 /* If there is no link, then there is no device */
564 if (bus->number != pp->root_bus_nr) {
565 if (!dw_pcie_link_up(pci))
569 /* Access only one slot on each root port */
570 if (bus->number == pp->root_bus_nr && dev > 0)
576 static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
579 struct pcie_port *pp = bus->sysdata;
581 if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
583 return PCIBIOS_DEVICE_NOT_FOUND;
586 if (bus->number == pp->root_bus_nr)
587 return dw_pcie_rd_own_conf(pp, where, size, val);
589 return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
592 static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
593 int where, int size, u32 val)
595 struct pcie_port *pp = bus->sysdata;
597 if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
598 return PCIBIOS_DEVICE_NOT_FOUND;
600 if (bus->number == pp->root_bus_nr)
601 return dw_pcie_wr_own_conf(pp, where, size, val);
603 return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
606 static struct pci_ops dw_pcie_ops = {
607 .read = dw_pcie_rd_conf,
608 .write = dw_pcie_wr_conf,
611 static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
615 val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
616 if (val == 0xffffffff)
622 void dw_pcie_setup_rc(struct pcie_port *pp)
624 u32 val, ctrl, num_ctrls;
625 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
629 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
631 /* Initialize IRQ Status array */
632 for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
633 pp->irq_mask[ctrl] = ~0;
634 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK +
635 (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
636 4, pp->irq_mask[ctrl]);
637 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
638 (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
643 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
644 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
646 /* Setup interrupt pins */
647 dw_pcie_dbi_ro_wr_en(pci);
648 val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
651 dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
652 dw_pcie_dbi_ro_wr_dis(pci);
654 /* Setup bus numbers */
655 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
658 dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
660 /* Setup command register */
661 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
663 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
664 PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
665 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
668 * If the platform provides ->rd_other_conf, it means the platform
669 * uses its own address translation component rather than ATU, so
670 * we should not program the ATU here.
672 if (!pp->ops->rd_other_conf) {
673 /* Get iATU unroll support */
674 pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
675 dev_dbg(pci->dev, "iATU unroll: %s\n",
676 pci->iatu_unroll_enabled ? "enabled" : "disabled");
678 if (pci->iatu_unroll_enabled && !pci->atu_base)
679 pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
681 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
682 PCIE_ATU_TYPE_MEM, pp->mem_base,
683 pp->mem_bus_addr, pp->mem_size);
684 if (pci->num_viewport > 2)
685 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2,
686 PCIE_ATU_TYPE_IO, pp->io_base,
687 pp->io_bus_addr, pp->io_size);
690 dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
692 /* Enable write permission for the DBI read-only register */
693 dw_pcie_dbi_ro_wr_en(pci);
694 /* Program correct class for RC */
695 dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
696 /* Better disable write permission right after the update */
697 dw_pcie_dbi_ro_wr_dis(pci);
699 dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
700 val |= PORT_LOGIC_SPEED_CHANGE;
701 dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);