2 * Support PCI/PCIe on PowerNV platforms
4 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
14 #include <linux/kernel.h>
15 #include <linux/pci.h>
16 #include <linux/delay.h>
17 #include <linux/string.h>
18 #include <linux/init.h>
19 #include <linux/bootmem.h>
20 #include <linux/irq.h>
22 #include <linux/msi.h>
24 #include <asm/sections.h>
27 #include <asm/pci-bridge.h>
28 #include <asm/machdep.h>
29 #include <asm/msi_bitmap.h>
30 #include <asm/ppc-pci.h>
32 #include <asm/iommu.h>
39 #define define_pe_printk_level(func, kern_level) \
40 static int func(const struct pnv_ioda_pe *pe, const char *fmt, ...) \
42 struct va_format vaf; \
47 va_start(args, fmt); \
53 strlcpy(pfix, dev_name(&pe->pdev->dev), \
56 sprintf(pfix, "%04x:%02x ", \
57 pci_domain_nr(pe->pbus), \
59 r = printk(kern_level "pci %s: [PE# %.3d] %pV", \
60 pfix, pe->pe_number, &vaf); \
67 define_pe_printk_level(pe_err, KERN_ERR);
68 define_pe_printk_level(pe_warn, KERN_WARNING);
69 define_pe_printk_level(pe_info, KERN_INFO);
71 static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev)
73 struct device_node *np;
75 np = pci_device_to_OF_node(dev);
81 static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
86 pe = find_next_zero_bit(phb->ioda.pe_alloc,
87 phb->ioda.total_pe, 0);
88 if (pe >= phb->ioda.total_pe)
89 return IODA_INVALID_PE;
90 } while(test_and_set_bit(pe, phb->ioda.pe_alloc));
92 phb->ioda.pe_array[pe].phb = phb;
93 phb->ioda.pe_array[pe].pe_number = pe;
97 static void pnv_ioda_free_pe(struct pnv_phb *phb, int pe)
99 WARN_ON(phb->ioda.pe_array[pe].pdev);
101 memset(&phb->ioda.pe_array[pe], 0, sizeof(struct pnv_ioda_pe));
102 clear_bit(pe, phb->ioda.pe_alloc);
105 /* Currently those 2 are only used when MSIs are enabled, this will change
106 * but in the meantime, we need to protect them to avoid warnings
108 #ifdef CONFIG_PCI_MSI
109 static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
111 struct pci_controller *hose = pci_bus_to_host(dev->bus);
112 struct pnv_phb *phb = hose->private_data;
113 struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
117 if (pdn->pe_number == IODA_INVALID_PE)
119 return &phb->ioda.pe_array[pdn->pe_number];
121 #endif /* CONFIG_PCI_MSI */
123 static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
125 struct pci_dev *parent;
126 uint8_t bcomp, dcomp, fcomp;
127 long rc, rid_end, rid;
129 /* Bus validation ? */
133 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
134 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
135 parent = pe->pbus->self;
136 if (pe->flags & PNV_IODA_PE_BUS_ALL)
137 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
142 case 1: bcomp = OpalPciBusAll; break;
143 case 2: bcomp = OpalPciBus7Bits; break;
144 case 4: bcomp = OpalPciBus6Bits; break;
145 case 8: bcomp = OpalPciBus5Bits; break;
146 case 16: bcomp = OpalPciBus4Bits; break;
147 case 32: bcomp = OpalPciBus3Bits; break;
149 pr_err("%s: Number of subordinate busses %d"
151 pci_name(pe->pbus->self), count);
152 /* Do an exact match only */
153 bcomp = OpalPciBusAll;
155 rid_end = pe->rid + (count << 8);
157 parent = pe->pdev->bus->self;
158 bcomp = OpalPciBusAll;
159 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
160 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
161 rid_end = pe->rid + 1;
164 /* Associate PE in PELT */
165 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
166 bcomp, dcomp, fcomp, OPAL_MAP_PE);
168 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
171 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
172 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
174 /* Add to all parents PELT-V */
176 struct pci_dn *pdn = pnv_ioda_get_pdn(parent);
177 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
178 rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
179 pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
180 /* XXX What to do in case of error ? */
182 parent = parent->bus->self;
184 /* Setup reverse map */
185 for (rid = pe->rid; rid < rid_end; rid++)
186 phb->ioda.pe_rmap[rid] = pe->pe_number;
188 /* Setup one MVTs on IODA1 */
189 if (phb->type == PNV_PHB_IODA1) {
190 pe->mve_number = pe->pe_number;
191 rc = opal_pci_set_mve(phb->opal_id, pe->mve_number,
194 pe_err(pe, "OPAL error %ld setting up MVE %d\n",
198 rc = opal_pci_set_mve_enable(phb->opal_id,
199 pe->mve_number, OPAL_ENABLE_MVE);
201 pe_err(pe, "OPAL error %ld enabling MVE %d\n",
206 } else if (phb->type == PNV_PHB_IODA2)
212 static void pnv_ioda_link_pe_by_weight(struct pnv_phb *phb,
213 struct pnv_ioda_pe *pe)
215 struct pnv_ioda_pe *lpe;
217 list_for_each_entry(lpe, &phb->ioda.pe_dma_list, dma_link) {
218 if (lpe->dma_weight < pe->dma_weight) {
219 list_add_tail(&pe->dma_link, &lpe->dma_link);
223 list_add_tail(&pe->dma_link, &phb->ioda.pe_dma_list);
226 static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
228 /* This is quite simplistic. The "base" weight of a device
229 * is 10. 0 means no DMA is to be accounted for it.
232 /* If it's a bridge, no DMA */
233 if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
236 /* Reduce the weight of slow USB controllers */
237 if (dev->class == PCI_CLASS_SERIAL_USB_UHCI ||
238 dev->class == PCI_CLASS_SERIAL_USB_OHCI ||
239 dev->class == PCI_CLASS_SERIAL_USB_EHCI)
242 /* Increase the weight of RAID (includes Obsidian) */
243 if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)
251 static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
253 struct pci_controller *hose = pci_bus_to_host(dev->bus);
254 struct pnv_phb *phb = hose->private_data;
255 struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
256 struct pnv_ioda_pe *pe;
260 pr_err("%s: Device tree node not associated properly\n",
264 if (pdn->pe_number != IODA_INVALID_PE)
267 /* PE#0 has been pre-set */
268 if (dev->bus->number == 0)
271 pe_num = pnv_ioda_alloc_pe(phb);
272 if (pe_num == IODA_INVALID_PE) {
273 pr_warning("%s: Not enough PE# available, disabling device\n",
278 /* NOTE: We get only one ref to the pci_dev for the pdn, not for the
279 * pointer in the PE data structure, both should be destroyed at the
280 * same time. However, this needs to be looked at more closely again
281 * once we actually start removing things (Hotplug, SR-IOV, ...)
283 * At some point we want to remove the PDN completely anyways
285 pe = &phb->ioda.pe_array[pe_num];
288 pdn->pe_number = pe_num;
293 pe->rid = dev->bus->number << 8 | pdn->devfn;
295 pe_info(pe, "Associated device to PE\n");
297 if (pnv_ioda_configure_pe(phb, pe)) {
298 /* XXX What do we do here ? */
300 pnv_ioda_free_pe(phb, pe_num);
301 pdn->pe_number = IODA_INVALID_PE;
307 /* Assign a DMA weight to the device */
308 pe->dma_weight = pnv_ioda_dma_weight(dev);
309 if (pe->dma_weight != 0) {
310 phb->ioda.dma_weight += pe->dma_weight;
311 phb->ioda.dma_pe_count++;
315 pnv_ioda_link_pe_by_weight(phb, pe);
319 #endif /* Useful for SRIOV case */
321 static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
325 list_for_each_entry(dev, &bus->devices, bus_list) {
326 struct pci_dn *pdn = pnv_ioda_get_pdn(dev);
329 pr_warn("%s: No device node associated with device !\n",
335 pdn->pe_number = pe->pe_number;
336 pe->dma_weight += pnv_ioda_dma_weight(dev);
337 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
338 pnv_ioda_setup_same_PE(dev->subordinate, pe);
343 * There're 2 types of PCI bus sensitive PEs: One that is compromised of
344 * single PCI bus. Another one that contains the primary PCI bus and its
345 * subordinate PCI devices and buses. The second type of PE is normally
346 * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
348 static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
350 struct pci_controller *hose = pci_bus_to_host(bus);
351 struct pnv_phb *phb = hose->private_data;
352 struct pnv_ioda_pe *pe;
355 pe_num = pnv_ioda_alloc_pe(phb);
356 if (pe_num == IODA_INVALID_PE) {
357 pr_warning("%s: Not enough PE# available for PCI bus %04x:%02x\n",
358 __func__, pci_domain_nr(bus), bus->number);
362 pe = &phb->ioda.pe_array[pe_num];
363 pe->flags = (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
368 pe->rid = bus->busn_res.start << 8;
372 pe_info(pe, "Secondary bus %d..%d associated with PE#%d\n",
373 bus->busn_res.start, bus->busn_res.end, pe_num);
375 pe_info(pe, "Secondary bus %d associated with PE#%d\n",
376 bus->busn_res.start, pe_num);
378 if (pnv_ioda_configure_pe(phb, pe)) {
379 /* XXX What do we do here ? */
381 pnv_ioda_free_pe(phb, pe_num);
386 /* Associate it with all child devices */
387 pnv_ioda_setup_same_PE(bus, pe);
389 /* Put PE to the list */
390 list_add_tail(&pe->list, &phb->ioda.pe_list);
392 /* Account for one DMA PE if at least one DMA capable device exist
395 if (pe->dma_weight != 0) {
396 phb->ioda.dma_weight += pe->dma_weight;
397 phb->ioda.dma_pe_count++;
401 pnv_ioda_link_pe_by_weight(phb, pe);
404 static void pnv_ioda_setup_PEs(struct pci_bus *bus)
408 pnv_ioda_setup_bus_PE(bus, 0);
410 list_for_each_entry(dev, &bus->devices, bus_list) {
411 if (dev->subordinate) {
412 if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE)
413 pnv_ioda_setup_bus_PE(dev->subordinate, 1);
415 pnv_ioda_setup_PEs(dev->subordinate);
421 * Configure PEs so that the downstream PCI buses and devices
422 * could have their associated PE#. Unfortunately, we didn't
423 * figure out the way to identify the PLX bridge yet. So we
424 * simply put the PCI bus and the subordinate behind the root
425 * port to PE# here. The game rule here is expected to be changed
426 * as soon as we can detected PLX bridge correctly.
428 static void pnv_pci_ioda_setup_PEs(void)
430 struct pci_controller *hose, *tmp;
432 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
433 pnv_ioda_setup_PEs(hose->bus);
437 static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
439 struct pci_dn *pdn = pnv_ioda_get_pdn(pdev);
440 struct pnv_ioda_pe *pe;
443 * The function can be called while the PE#
444 * hasn't been assigned. Do nothing for the
447 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
450 pe = &phb->ioda.pe_array[pdn->pe_number];
451 set_iommu_table_base(&pdev->dev, &pe->tce32_table);
454 static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
455 u64 *startp, u64 *endp)
457 u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
458 unsigned long start, end, inc;
460 start = __pa(startp);
463 /* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */
468 start |= tbl->it_busno;
469 end |= tbl->it_busno;
470 } else if (tbl->it_type & TCE_PCI_SWINV_PAIR) {
471 /* p7ioc-style invalidation, 2 TCEs per write */
472 start |= (1ull << 63);
476 /* Default (older HW) */
480 end |= inc - 1; /* round up end to be different than start */
482 mb(); /* Ensure above stores are visible */
483 while (start <= end) {
484 __raw_writeq(start, invalidate);
489 * The iommu layer will do another mb() for us on build()
490 * and we don't care on free()
494 static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe,
495 struct iommu_table *tbl,
496 u64 *startp, u64 *endp)
498 unsigned long start, end, inc;
499 u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
501 /* We'll invalidate DMA address in PE scope */
503 start |= (pe->pe_number & 0xFF);
506 /* Figure out the start, end and step */
507 inc = tbl->it_offset + (((u64)startp - tbl->it_base) / sizeof(u64));
508 start |= (inc << 12);
509 inc = tbl->it_offset + (((u64)endp - tbl->it_base) / sizeof(u64));
514 while (start <= end) {
515 __raw_writeq(start, invalidate);
520 void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
521 u64 *startp, u64 *endp)
523 struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
525 struct pnv_phb *phb = pe->phb;
527 if (phb->type == PNV_PHB_IODA1)
528 pnv_pci_ioda1_tce_invalidate(tbl, startp, endp);
530 pnv_pci_ioda2_tce_invalidate(pe, tbl, startp, endp);
533 static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
534 struct pnv_ioda_pe *pe, unsigned int base,
538 struct page *tce_mem = NULL;
539 const __be64 *swinvp;
540 struct iommu_table *tbl;
545 /* 256M DMA window, 4K TCE pages, 8 bytes TCE */
546 #define TCE32_TABLE_SIZE ((0x10000000 / 0x1000) * 8)
548 /* XXX FIXME: Handle 64-bit only DMA devices */
549 /* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
550 /* XXX FIXME: Allocate multi-level tables on PHB3 */
552 /* We shouldn't already have a 32-bit DMA associated */
553 if (WARN_ON(pe->tce32_seg >= 0))
556 /* Grab a 32-bit TCE table */
557 pe->tce32_seg = base;
558 pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n",
559 (base << 28), ((base + segs) << 28) - 1);
561 /* XXX Currently, we allocate one big contiguous table for the
562 * TCEs. We only really need one chunk per 256M of TCE space
563 * (ie per segment) but that's an optimization for later, it
564 * requires some added smarts with our get/put_tce implementation
566 tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
567 get_order(TCE32_TABLE_SIZE * segs));
569 pe_err(pe, " Failed to allocate a 32-bit TCE memory\n");
572 addr = page_address(tce_mem);
573 memset(addr, 0, TCE32_TABLE_SIZE * segs);
576 for (i = 0; i < segs; i++) {
577 rc = opal_pci_map_pe_dma_window(phb->opal_id,
580 __pa(addr) + TCE32_TABLE_SIZE * i,
581 TCE32_TABLE_SIZE, 0x1000);
583 pe_err(pe, " Failed to configure 32-bit TCE table,"
589 /* Setup linux iommu table */
590 tbl = &pe->tce32_table;
591 pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs,
594 /* OPAL variant of P7IOC SW invalidated TCEs */
595 swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
597 /* We need a couple more fields -- an address and a data
598 * to or. Since the bus is only printed out on table free
599 * errors, and on the first pass the data will be a relative
600 * bus number, print that out instead.
603 tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
604 tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE |
607 iommu_init_table(tbl, phb->hose->node);
611 /* XXX Failure: Try to fallback to 64-bit only ? */
612 if (pe->tce32_seg >= 0)
615 __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
618 static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
619 struct pnv_ioda_pe *pe)
621 struct page *tce_mem = NULL;
623 const __be64 *swinvp;
624 struct iommu_table *tbl;
625 unsigned int tce_table_size, end;
628 /* We shouldn't already have a 32-bit DMA associated */
629 if (WARN_ON(pe->tce32_seg >= 0))
632 /* The PE will reserve all possible 32-bits space */
634 end = (1 << ilog2(phb->ioda.m32_pci_base));
635 tce_table_size = (end / 0x1000) * 8;
636 pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
639 /* Allocate TCE table */
640 tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
641 get_order(tce_table_size));
643 pe_err(pe, "Failed to allocate a 32-bit TCE memory\n");
646 addr = page_address(tce_mem);
647 memset(addr, 0, tce_table_size);
650 * Map TCE table through TVT. The TVE index is the PE number
651 * shifted by 1 bit for 32-bits DMA space.
653 rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
654 pe->pe_number << 1, 1, __pa(addr),
655 tce_table_size, 0x1000);
657 pe_err(pe, "Failed to configure 32-bit TCE table,"
662 /* Setup linux iommu table */
663 tbl = &pe->tce32_table;
664 pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, 0);
666 /* OPAL variant of PHB3 invalidated TCEs */
667 swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
669 /* We need a couple more fields -- an address and a data
670 * to or. Since the bus is only printed out on table free
671 * errors, and on the first pass the data will be a relative
672 * bus number, print that out instead.
675 tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
676 tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
678 iommu_init_table(tbl, phb->hose->node);
682 if (pe->tce32_seg >= 0)
685 __free_pages(tce_mem, get_order(tce_table_size));
688 static void pnv_ioda_setup_dma(struct pnv_phb *phb)
690 struct pci_controller *hose = phb->hose;
691 unsigned int residual, remaining, segs, tw, base;
692 struct pnv_ioda_pe *pe;
694 /* If we have more PE# than segments available, hand out one
695 * per PE until we run out and let the rest fail. If not,
696 * then we assign at least one segment per PE, plus more based
697 * on the amount of devices under that PE
699 if (phb->ioda.dma_pe_count > phb->ioda.tce32_count)
702 residual = phb->ioda.tce32_count -
703 phb->ioda.dma_pe_count;
705 pr_info("PCI: Domain %04x has %ld available 32-bit DMA segments\n",
706 hose->global_number, phb->ioda.tce32_count);
707 pr_info("PCI: %d PE# for a total weight of %d\n",
708 phb->ioda.dma_pe_count, phb->ioda.dma_weight);
710 /* Walk our PE list and configure their DMA segments, hand them
711 * out one base segment plus any residual segments based on
714 remaining = phb->ioda.tce32_count;
715 tw = phb->ioda.dma_weight;
717 list_for_each_entry(pe, &phb->ioda.pe_dma_list, dma_link) {
721 pe_warn(pe, "No DMA32 resources available\n");
726 segs += ((pe->dma_weight * residual) + (tw / 2)) / tw;
727 if (segs > remaining)
732 * For IODA2 compliant PHB3, we needn't care about the weight.
733 * The all available 32-bits DMA space will be assigned to
736 if (phb->type == PNV_PHB_IODA1) {
737 pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n",
738 pe->dma_weight, segs);
739 pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs);
741 pe_info(pe, "Assign DMA32 space\n");
743 pnv_pci_ioda2_setup_dma_pe(phb, pe);
751 #ifdef CONFIG_PCI_MSI
752 static void pnv_ioda2_msi_eoi(struct irq_data *d)
754 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
755 struct irq_chip *chip = irq_data_get_irq_chip(d);
756 struct pnv_phb *phb = container_of(chip, struct pnv_phb,
760 rc = opal_pci_msi_eoi(phb->opal_id, hw_irq);
766 static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
767 unsigned int hwirq, unsigned int virq,
768 unsigned int is_64, struct msi_msg *msg)
770 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
771 struct irq_data *idata;
772 struct irq_chip *ichip;
773 unsigned int xive_num = hwirq - phb->msi_base;
775 uint32_t addr32, data;
778 /* No PE assigned ? bail out ... no MSI for you ! */
782 /* Check if we have an MVE */
783 if (pe->mve_number < 0)
786 /* Assign XIVE to PE */
787 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
789 pr_warn("%s: OPAL error %d setting XIVE %d PE\n",
790 pci_name(dev), rc, xive_num);
795 rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
798 pr_warn("%s: OPAL error %d getting 64-bit MSI data\n",
802 msg->address_hi = addr64 >> 32;
803 msg->address_lo = addr64 & 0xfffffffful;
805 rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
808 pr_warn("%s: OPAL error %d getting 32-bit MSI data\n",
813 msg->address_lo = addr32;
818 * Change the IRQ chip for the MSI interrupts on PHB3.
819 * The corresponding IRQ chip should be populated for
822 if (phb->type == PNV_PHB_IODA2) {
823 if (!phb->ioda.irq_chip_init) {
824 idata = irq_get_irq_data(virq);
825 ichip = irq_data_get_irq_chip(idata);
826 phb->ioda.irq_chip_init = 1;
827 phb->ioda.irq_chip = *ichip;
828 phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
831 irq_set_chip(virq, &phb->ioda.irq_chip);
834 pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
835 " address=%x_%08x data=%x PE# %d\n",
836 pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
837 msg->address_hi, msg->address_lo, data, pe->pe_number);
842 static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
845 const __be32 *prop = of_get_property(phb->hose->dn,
846 "ibm,opal-msi-ranges", NULL);
849 prop = of_get_property(phb->hose->dn, "msi-ranges", NULL);
854 phb->msi_base = be32_to_cpup(prop);
855 count = be32_to_cpup(prop + 1);
856 if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
857 pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
858 phb->hose->global_number);
862 phb->msi_setup = pnv_pci_ioda_msi_setup;
863 phb->msi32_support = 1;
864 pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
865 count, phb->msi_base);
868 static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
869 #endif /* CONFIG_PCI_MSI */
872 * This function is supposed to be called on basis of PE from top
873 * to bottom style. So the the I/O or MMIO segment assigned to
874 * parent PE could be overrided by its child PEs if necessary.
876 static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
877 struct pnv_ioda_pe *pe)
879 struct pnv_phb *phb = hose->private_data;
880 struct pci_bus_region region;
881 struct resource *res;
886 * NOTE: We only care PCI bus based PE for now. For PCI
887 * device based PE, for example SRIOV sensitive VF should
888 * be figured out later.
890 BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
892 pci_bus_for_each_resource(pe->pbus, res, i) {
893 if (!res || !res->flags ||
894 res->start > res->end)
897 if (res->flags & IORESOURCE_IO) {
898 region.start = res->start - phb->ioda.io_pci_base;
899 region.end = res->end - phb->ioda.io_pci_base;
900 index = region.start / phb->ioda.io_segsize;
902 while (index < phb->ioda.total_pe &&
903 region.start <= region.end) {
904 phb->ioda.io_segmap[index] = pe->pe_number;
905 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
906 pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
907 if (rc != OPAL_SUCCESS) {
908 pr_err("%s: OPAL error %d when mapping IO "
909 "segment #%d to PE#%d\n",
910 __func__, rc, index, pe->pe_number);
914 region.start += phb->ioda.io_segsize;
917 } else if (res->flags & IORESOURCE_MEM) {
918 region.start = res->start -
919 hose->pci_mem_offset -
920 phb->ioda.m32_pci_base;
921 region.end = res->end -
922 hose->pci_mem_offset -
923 phb->ioda.m32_pci_base;
924 index = region.start / phb->ioda.m32_segsize;
926 while (index < phb->ioda.total_pe &&
927 region.start <= region.end) {
928 phb->ioda.m32_segmap[index] = pe->pe_number;
929 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
930 pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
931 if (rc != OPAL_SUCCESS) {
932 pr_err("%s: OPAL error %d when mapping M32 "
933 "segment#%d to PE#%d",
934 __func__, rc, index, pe->pe_number);
938 region.start += phb->ioda.m32_segsize;
945 static void pnv_pci_ioda_setup_seg(void)
947 struct pci_controller *tmp, *hose;
949 struct pnv_ioda_pe *pe;
951 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
952 phb = hose->private_data;
953 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
954 pnv_ioda_setup_pe_seg(hose, pe);
959 static void pnv_pci_ioda_setup_DMA(void)
961 struct pci_controller *hose, *tmp;
964 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
965 pnv_ioda_setup_dma(hose->private_data);
967 /* Mark the PHB initialization done */
968 phb = hose->private_data;
969 phb->initialized = 1;
973 static void pnv_pci_ioda_fixup(void)
975 pnv_pci_ioda_setup_PEs();
976 pnv_pci_ioda_setup_seg();
977 pnv_pci_ioda_setup_DMA();
981 * Returns the alignment for I/O or memory windows for P2P
982 * bridges. That actually depends on how PEs are segmented.
983 * For now, we return I/O or M32 segment size for PE sensitive
984 * P2P bridges. Otherwise, the default values (4KiB for I/O,
985 * 1MiB for memory) will be returned.
987 * The current PCI bus might be put into one PE, which was
988 * create against the parent PCI bridge. For that case, we
989 * needn't enlarge the alignment so that we can save some
992 static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
995 struct pci_dev *bridge;
996 struct pci_controller *hose = pci_bus_to_host(bus);
997 struct pnv_phb *phb = hose->private_data;
998 int num_pci_bridges = 0;
1002 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) {
1004 if (num_pci_bridges >= 2)
1008 bridge = bridge->bus->self;
1011 /* We need support prefetchable memory window later */
1012 if (type & IORESOURCE_MEM)
1013 return phb->ioda.m32_segsize;
1015 return phb->ioda.io_segsize;
1018 /* Prevent enabling devices for which we couldn't properly
1021 static int pnv_pci_enable_device_hook(struct pci_dev *dev)
1023 struct pci_controller *hose = pci_bus_to_host(dev->bus);
1024 struct pnv_phb *phb = hose->private_data;
1027 /* The function is probably called while the PEs have
1028 * not be created yet. For example, resource reassignment
1029 * during PCI probe period. We just skip the check if
1032 if (!phb->initialized)
1035 pdn = pnv_ioda_get_pdn(dev);
1036 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
1042 static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
1045 return phb->ioda.pe_rmap[(bus->number << 8) | devfn];
1048 void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type)
1050 struct pci_controller *hose;
1051 static int primary = 1;
1052 struct pnv_phb *phb;
1053 unsigned long size, m32map_off, iomap_off, pemap_off;
1060 pr_info(" Initializing IODA%d OPAL PHB %s\n", ioda_type, np->full_name);
1062 prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
1064 pr_err(" Missing \"ibm,opal-phbid\" property !\n");
1067 phb_id = be64_to_cpup(prop64);
1068 pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
1070 phb = alloc_bootmem(sizeof(struct pnv_phb));
1072 memset(phb, 0, sizeof(struct pnv_phb));
1073 phb->hose = hose = pcibios_alloc_controller(np);
1075 if (!phb || !phb->hose) {
1076 pr_err("PCI: Failed to allocate PCI controller for %s\n",
1081 spin_lock_init(&phb->lock);
1082 /* XXX Use device-tree */
1083 hose->first_busno = 0;
1084 hose->last_busno = 0xff;
1085 hose->private_data = phb;
1086 phb->opal_id = phb_id;
1087 phb->type = ioda_type;
1089 /* Detect specific models for error handling */
1090 if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
1091 phb->model = PNV_PHB_MODEL_P7IOC;
1092 else if (of_device_is_compatible(np, "ibm,p8-pciex"))
1093 phb->model = PNV_PHB_MODEL_PHB3;
1095 phb->model = PNV_PHB_MODEL_UNKNOWN;
1097 /* Parse 32-bit and IO ranges (if any) */
1098 pci_process_bridge_OF_ranges(phb->hose, np, primary);
1102 phb->regs = of_iomap(np, 0);
1103 if (phb->regs == NULL)
1104 pr_err(" Failed to map registers !\n");
1106 /* Initialize more IODA stuff */
1107 prop32 = of_get_property(np, "ibm,opal-num-pes", NULL);
1109 phb->ioda.total_pe = 1;
1111 phb->ioda.total_pe = *prop32;
1113 phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
1114 /* FW Has already off top 64k of M32 space (MSI space) */
1115 phb->ioda.m32_size += 0x10000;
1117 phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe;
1118 phb->ioda.m32_pci_base = hose->mem_resources[0].start -
1119 hose->pci_mem_offset;
1120 phb->ioda.io_size = hose->pci_io_size;
1121 phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe;
1122 phb->ioda.io_pci_base = 0; /* XXX calculate this ? */
1124 /* Allocate aux data & arrays
1126 * XXX TODO: Don't allocate io segmap on PHB3
1128 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
1130 size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]);
1132 size += phb->ioda.total_pe * sizeof(phb->ioda.io_segmap[0]);
1134 size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe);
1135 aux = alloc_bootmem(size);
1136 memset(aux, 0, size);
1137 phb->ioda.pe_alloc = aux;
1138 phb->ioda.m32_segmap = aux + m32map_off;
1139 phb->ioda.io_segmap = aux + iomap_off;
1140 phb->ioda.pe_array = aux + pemap_off;
1141 set_bit(0, phb->ioda.pe_alloc);
1143 INIT_LIST_HEAD(&phb->ioda.pe_dma_list);
1144 INIT_LIST_HEAD(&phb->ioda.pe_list);
1146 /* Calculate how many 32-bit TCE segments we have */
1147 phb->ioda.tce32_count = phb->ioda.m32_pci_base >> 28;
1149 /* Clear unusable m64 */
1150 hose->mem_resources[1].flags = 0;
1151 hose->mem_resources[1].start = 0;
1152 hose->mem_resources[1].end = 0;
1153 hose->mem_resources[2].flags = 0;
1154 hose->mem_resources[2].start = 0;
1155 hose->mem_resources[2].end = 0;
1157 #if 0 /* We should really do that ... */
1158 rc = opal_pci_set_phb_mem_window(opal->phb_id,
1161 starting_real_address,
1162 starting_pci_address,
1166 pr_info(" %d PE's M32: 0x%x [segment=0x%x] IO: 0x%x [segment=0x%x]\n",
1168 phb->ioda.m32_size, phb->ioda.m32_segsize,
1169 phb->ioda.io_size, phb->ioda.io_segsize);
1171 phb->hose->ops = &pnv_pci_ops;
1173 /* Setup RID -> PE mapping function */
1174 phb->bdfn_to_pe = pnv_ioda_bdfn_to_pe;
1177 phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
1179 /* Setup MSI support */
1180 pnv_pci_init_ioda_msis(phb);
1183 * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here
1184 * to let the PCI core do resource assignment. It's supposed
1185 * that the PCI core will do correct I/O and MMIO alignment
1186 * for the P2P bridge bars so that each PCI bus (excluding
1187 * the child P2P bridges) can form individual PE.
1189 ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
1190 ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook;
1191 ppc_md.pcibios_window_alignment = pnv_pci_window_alignment;
1192 pci_add_flags(PCI_REASSIGN_ALL_RSRC);
1194 /* Reset IODA tables to a clean state */
1195 rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET);
1197 pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc);
1200 * On IODA1 map everything to PE#0, on IODA2 we assume the IODA reset
1201 * has cleared the RTT which has the same effect
1203 if (ioda_type == PNV_PHB_IODA1)
1204 opal_pci_set_pe(phb_id, 0, 0, 7, 1, 1 , OPAL_MAP_PE);
1207 void pnv_pci_init_ioda2_phb(struct device_node *np)
1209 pnv_pci_init_ioda_phb(np, PNV_PHB_IODA2);
1212 void __init pnv_pci_init_ioda_hub(struct device_node *np)
1214 struct device_node *phbn;
1218 pr_info("Probing IODA IO-Hub %s\n", np->full_name);
1220 prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
1222 pr_err(" Missing \"ibm,opal-hubid\" property !\n");
1225 hub_id = be64_to_cpup(prop64);
1226 pr_devel(" HUB-ID : 0x%016llx\n", hub_id);
1228 /* Count child PHBs */
1229 for_each_child_of_node(np, phbn) {
1230 /* Look for IODA1 PHBs */
1231 if (of_device_is_compatible(phbn, "ibm,ioda-phb"))
1232 pnv_pci_init_ioda_phb(phbn, PNV_PHB_IODA1);