beedaf0c5e750d9e32c2ccc92e97792283eb1db7
[platform/adaptation/renesas_rcar/renesas_kernel.git] / arch / powerpc / platforms / powernv / pci-ioda.c
1 /*
2  * Support PCI/PCIe on PowerNV platforms
3  *
4  * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #undef DEBUG
13
14 #include <linux/kernel.h>
15 #include <linux/pci.h>
16 #include <linux/debugfs.h>
17 #include <linux/delay.h>
18 #include <linux/string.h>
19 #include <linux/init.h>
20 #include <linux/bootmem.h>
21 #include <linux/irq.h>
22 #include <linux/io.h>
23 #include <linux/msi.h>
24 #include <linux/memblock.h>
25
26 #include <asm/sections.h>
27 #include <asm/io.h>
28 #include <asm/prom.h>
29 #include <asm/pci-bridge.h>
30 #include <asm/machdep.h>
31 #include <asm/msi_bitmap.h>
32 #include <asm/ppc-pci.h>
33 #include <asm/opal.h>
34 #include <asm/iommu.h>
35 #include <asm/tce.h>
36 #include <asm/xics.h>
37 #include <asm/debug.h>
38
39 #include "powernv.h"
40 #include "pci.h"
41
42 #define define_pe_printk_level(func, kern_level)                \
43 static int func(const struct pnv_ioda_pe *pe, const char *fmt, ...)     \
44 {                                                               \
45         struct va_format vaf;                                   \
46         va_list args;                                           \
47         char pfix[32];                                          \
48         int r;                                                  \
49                                                                 \
50         va_start(args, fmt);                                    \
51                                                                 \
52         vaf.fmt = fmt;                                          \
53         vaf.va = &args;                                         \
54                                                                 \
55         if (pe->pdev)                                           \
56                 strlcpy(pfix, dev_name(&pe->pdev->dev),         \
57                         sizeof(pfix));                          \
58         else                                                    \
59                 sprintf(pfix, "%04x:%02x     ",                 \
60                         pci_domain_nr(pe->pbus),                \
61                         pe->pbus->number);                      \
62         r = printk(kern_level "pci %s: [PE# %.3d] %pV",         \
63                    pfix, pe->pe_number, &vaf);                  \
64                                                                 \
65         va_end(args);                                           \
66                                                                 \
67         return r;                                               \
68 }                                                               \
69
70 define_pe_printk_level(pe_err, KERN_ERR);
71 define_pe_printk_level(pe_warn, KERN_WARNING);
72 define_pe_printk_level(pe_info, KERN_INFO);
73
74 /*
75  * stdcix is only supposed to be used in hypervisor real mode as per
76  * the architecture spec
77  */
78 static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr)
79 {
80         __asm__ __volatile__("stdcix %0,0,%1"
81                 : : "r" (val), "r" (paddr) : "memory");
82 }
83
84 static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
85 {
86         unsigned long pe;
87
88         do {
89                 pe = find_next_zero_bit(phb->ioda.pe_alloc,
90                                         phb->ioda.total_pe, 0);
91                 if (pe >= phb->ioda.total_pe)
92                         return IODA_INVALID_PE;
93         } while(test_and_set_bit(pe, phb->ioda.pe_alloc));
94
95         phb->ioda.pe_array[pe].phb = phb;
96         phb->ioda.pe_array[pe].pe_number = pe;
97         return pe;
98 }
99
100 static void pnv_ioda_free_pe(struct pnv_phb *phb, int pe)
101 {
102         WARN_ON(phb->ioda.pe_array[pe].pdev);
103
104         memset(&phb->ioda.pe_array[pe], 0, sizeof(struct pnv_ioda_pe));
105         clear_bit(pe, phb->ioda.pe_alloc);
106 }
107
108 /* Currently those 2 are only used when MSIs are enabled, this will change
109  * but in the meantime, we need to protect them to avoid warnings
110  */
111 #ifdef CONFIG_PCI_MSI
112 static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
113 {
114         struct pci_controller *hose = pci_bus_to_host(dev->bus);
115         struct pnv_phb *phb = hose->private_data;
116         struct pci_dn *pdn = pci_get_pdn(dev);
117
118         if (!pdn)
119                 return NULL;
120         if (pdn->pe_number == IODA_INVALID_PE)
121                 return NULL;
122         return &phb->ioda.pe_array[pdn->pe_number];
123 }
124 #endif /* CONFIG_PCI_MSI */
125
126 static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
127 {
128         struct pci_dev *parent;
129         uint8_t bcomp, dcomp, fcomp;
130         long rc, rid_end, rid;
131
132         /* Bus validation ? */
133         if (pe->pbus) {
134                 int count;
135
136                 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
137                 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
138                 parent = pe->pbus->self;
139                 if (pe->flags & PNV_IODA_PE_BUS_ALL)
140                         count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
141                 else
142                         count = 1;
143
144                 switch(count) {
145                 case  1: bcomp = OpalPciBusAll;         break;
146                 case  2: bcomp = OpalPciBus7Bits;       break;
147                 case  4: bcomp = OpalPciBus6Bits;       break;
148                 case  8: bcomp = OpalPciBus5Bits;       break;
149                 case 16: bcomp = OpalPciBus4Bits;       break;
150                 case 32: bcomp = OpalPciBus3Bits;       break;
151                 default:
152                         pr_err("%s: Number of subordinate busses %d"
153                                " unsupported\n",
154                                pci_name(pe->pbus->self), count);
155                         /* Do an exact match only */
156                         bcomp = OpalPciBusAll;
157                 }
158                 rid_end = pe->rid + (count << 8);
159         } else {
160                 parent = pe->pdev->bus->self;
161                 bcomp = OpalPciBusAll;
162                 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
163                 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
164                 rid_end = pe->rid + 1;
165         }
166
167         /*
168          * Associate PE in PELT. We need add the PE into the
169          * corresponding PELT-V as well. Otherwise, the error
170          * originated from the PE might contribute to other
171          * PEs.
172          */
173         rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
174                              bcomp, dcomp, fcomp, OPAL_MAP_PE);
175         if (rc) {
176                 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
177                 return -ENXIO;
178         }
179
180         rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
181                                 pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
182         if (rc)
183                 pe_warn(pe, "OPAL error %d adding self to PELTV\n", rc);
184         opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
185                                   OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
186
187         /* Add to all parents PELT-V */
188         while (parent) {
189                 struct pci_dn *pdn = pci_get_pdn(parent);
190                 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
191                         rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
192                                                 pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
193                         /* XXX What to do in case of error ? */
194                 }
195                 parent = parent->bus->self;
196         }
197         /* Setup reverse map */
198         for (rid = pe->rid; rid < rid_end; rid++)
199                 phb->ioda.pe_rmap[rid] = pe->pe_number;
200
201         /* Setup one MVTs on IODA1 */
202         if (phb->type == PNV_PHB_IODA1) {
203                 pe->mve_number = pe->pe_number;
204                 rc = opal_pci_set_mve(phb->opal_id, pe->mve_number,
205                                       pe->pe_number);
206                 if (rc) {
207                         pe_err(pe, "OPAL error %ld setting up MVE %d\n",
208                                rc, pe->mve_number);
209                         pe->mve_number = -1;
210                 } else {
211                         rc = opal_pci_set_mve_enable(phb->opal_id,
212                                                      pe->mve_number, OPAL_ENABLE_MVE);
213                         if (rc) {
214                                 pe_err(pe, "OPAL error %ld enabling MVE %d\n",
215                                        rc, pe->mve_number);
216                                 pe->mve_number = -1;
217                         }
218                 }
219         } else if (phb->type == PNV_PHB_IODA2)
220                 pe->mve_number = 0;
221
222         return 0;
223 }
224
225 static void pnv_ioda_link_pe_by_weight(struct pnv_phb *phb,
226                                        struct pnv_ioda_pe *pe)
227 {
228         struct pnv_ioda_pe *lpe;
229
230         list_for_each_entry(lpe, &phb->ioda.pe_dma_list, dma_link) {
231                 if (lpe->dma_weight < pe->dma_weight) {
232                         list_add_tail(&pe->dma_link, &lpe->dma_link);
233                         return;
234                 }
235         }
236         list_add_tail(&pe->dma_link, &phb->ioda.pe_dma_list);
237 }
238
239 static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
240 {
241         /* This is quite simplistic. The "base" weight of a device
242          * is 10. 0 means no DMA is to be accounted for it.
243          */
244
245         /* If it's a bridge, no DMA */
246         if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
247                 return 0;
248
249         /* Reduce the weight of slow USB controllers */
250         if (dev->class == PCI_CLASS_SERIAL_USB_UHCI ||
251             dev->class == PCI_CLASS_SERIAL_USB_OHCI ||
252             dev->class == PCI_CLASS_SERIAL_USB_EHCI)
253                 return 3;
254
255         /* Increase the weight of RAID (includes Obsidian) */
256         if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)
257                 return 15;
258
259         /* Default */
260         return 10;
261 }
262
263 #if 0
264 static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
265 {
266         struct pci_controller *hose = pci_bus_to_host(dev->bus);
267         struct pnv_phb *phb = hose->private_data;
268         struct pci_dn *pdn = pci_get_pdn(dev);
269         struct pnv_ioda_pe *pe;
270         int pe_num;
271
272         if (!pdn) {
273                 pr_err("%s: Device tree node not associated properly\n",
274                            pci_name(dev));
275                 return NULL;
276         }
277         if (pdn->pe_number != IODA_INVALID_PE)
278                 return NULL;
279
280         /* PE#0 has been pre-set */
281         if (dev->bus->number == 0)
282                 pe_num = 0;
283         else
284                 pe_num = pnv_ioda_alloc_pe(phb);
285         if (pe_num == IODA_INVALID_PE) {
286                 pr_warning("%s: Not enough PE# available, disabling device\n",
287                            pci_name(dev));
288                 return NULL;
289         }
290
291         /* NOTE: We get only one ref to the pci_dev for the pdn, not for the
292          * pointer in the PE data structure, both should be destroyed at the
293          * same time. However, this needs to be looked at more closely again
294          * once we actually start removing things (Hotplug, SR-IOV, ...)
295          *
296          * At some point we want to remove the PDN completely anyways
297          */
298         pe = &phb->ioda.pe_array[pe_num];
299         pci_dev_get(dev);
300         pdn->pcidev = dev;
301         pdn->pe_number = pe_num;
302         pe->pdev = dev;
303         pe->pbus = NULL;
304         pe->tce32_seg = -1;
305         pe->mve_number = -1;
306         pe->rid = dev->bus->number << 8 | pdn->devfn;
307
308         pe_info(pe, "Associated device to PE\n");
309
310         if (pnv_ioda_configure_pe(phb, pe)) {
311                 /* XXX What do we do here ? */
312                 if (pe_num)
313                         pnv_ioda_free_pe(phb, pe_num);
314                 pdn->pe_number = IODA_INVALID_PE;
315                 pe->pdev = NULL;
316                 pci_dev_put(dev);
317                 return NULL;
318         }
319
320         /* Assign a DMA weight to the device */
321         pe->dma_weight = pnv_ioda_dma_weight(dev);
322         if (pe->dma_weight != 0) {
323                 phb->ioda.dma_weight += pe->dma_weight;
324                 phb->ioda.dma_pe_count++;
325         }
326
327         /* Link the PE */
328         pnv_ioda_link_pe_by_weight(phb, pe);
329
330         return pe;
331 }
332 #endif /* Useful for SRIOV case */
333
334 static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
335 {
336         struct pci_dev *dev;
337
338         list_for_each_entry(dev, &bus->devices, bus_list) {
339                 struct pci_dn *pdn = pci_get_pdn(dev);
340
341                 if (pdn == NULL) {
342                         pr_warn("%s: No device node associated with device !\n",
343                                 pci_name(dev));
344                         continue;
345                 }
346                 pci_dev_get(dev);
347                 pdn->pcidev = dev;
348                 pdn->pe_number = pe->pe_number;
349                 pe->dma_weight += pnv_ioda_dma_weight(dev);
350                 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
351                         pnv_ioda_setup_same_PE(dev->subordinate, pe);
352         }
353 }
354
355 /*
356  * There're 2 types of PCI bus sensitive PEs: One that is compromised of
357  * single PCI bus. Another one that contains the primary PCI bus and its
358  * subordinate PCI devices and buses. The second type of PE is normally
359  * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
360  */
361 static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
362 {
363         struct pci_controller *hose = pci_bus_to_host(bus);
364         struct pnv_phb *phb = hose->private_data;
365         struct pnv_ioda_pe *pe;
366         int pe_num;
367
368         pe_num = pnv_ioda_alloc_pe(phb);
369         if (pe_num == IODA_INVALID_PE) {
370                 pr_warning("%s: Not enough PE# available for PCI bus %04x:%02x\n",
371                         __func__, pci_domain_nr(bus), bus->number);
372                 return;
373         }
374
375         pe = &phb->ioda.pe_array[pe_num];
376         pe->flags = (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
377         pe->pbus = bus;
378         pe->pdev = NULL;
379         pe->tce32_seg = -1;
380         pe->mve_number = -1;
381         pe->rid = bus->busn_res.start << 8;
382         pe->dma_weight = 0;
383
384         if (all)
385                 pe_info(pe, "Secondary bus %d..%d associated with PE#%d\n",
386                         bus->busn_res.start, bus->busn_res.end, pe_num);
387         else
388                 pe_info(pe, "Secondary bus %d associated with PE#%d\n",
389                         bus->busn_res.start, pe_num);
390
391         if (pnv_ioda_configure_pe(phb, pe)) {
392                 /* XXX What do we do here ? */
393                 if (pe_num)
394                         pnv_ioda_free_pe(phb, pe_num);
395                 pe->pbus = NULL;
396                 return;
397         }
398
399         /* Associate it with all child devices */
400         pnv_ioda_setup_same_PE(bus, pe);
401
402         /* Put PE to the list */
403         list_add_tail(&pe->list, &phb->ioda.pe_list);
404
405         /* Account for one DMA PE if at least one DMA capable device exist
406          * below the bridge
407          */
408         if (pe->dma_weight != 0) {
409                 phb->ioda.dma_weight += pe->dma_weight;
410                 phb->ioda.dma_pe_count++;
411         }
412
413         /* Link the PE */
414         pnv_ioda_link_pe_by_weight(phb, pe);
415 }
416
417 static void pnv_ioda_setup_PEs(struct pci_bus *bus)
418 {
419         struct pci_dev *dev;
420
421         pnv_ioda_setup_bus_PE(bus, 0);
422
423         list_for_each_entry(dev, &bus->devices, bus_list) {
424                 if (dev->subordinate) {
425                         if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE)
426                                 pnv_ioda_setup_bus_PE(dev->subordinate, 1);
427                         else
428                                 pnv_ioda_setup_PEs(dev->subordinate);
429                 }
430         }
431 }
432
433 /*
434  * Configure PEs so that the downstream PCI buses and devices
435  * could have their associated PE#. Unfortunately, we didn't
436  * figure out the way to identify the PLX bridge yet. So we
437  * simply put the PCI bus and the subordinate behind the root
438  * port to PE# here. The game rule here is expected to be changed
439  * as soon as we can detected PLX bridge correctly.
440  */
441 static void pnv_pci_ioda_setup_PEs(void)
442 {
443         struct pci_controller *hose, *tmp;
444
445         list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
446                 pnv_ioda_setup_PEs(hose->bus);
447         }
448 }
449
450 static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
451 {
452         struct pci_dn *pdn = pci_get_pdn(pdev);
453         struct pnv_ioda_pe *pe;
454
455         /*
456          * The function can be called while the PE#
457          * hasn't been assigned. Do nothing for the
458          * case.
459          */
460         if (!pdn || pdn->pe_number == IODA_INVALID_PE)
461                 return;
462
463         pe = &phb->ioda.pe_array[pdn->pe_number];
464         WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
465         set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table);
466 }
467
468 static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
469                                      struct pci_dev *pdev, u64 dma_mask)
470 {
471         struct pci_dn *pdn = pci_get_pdn(pdev);
472         struct pnv_ioda_pe *pe;
473         uint64_t top;
474         bool bypass = false;
475
476         if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
477                 return -ENODEV;;
478
479         pe = &phb->ioda.pe_array[pdn->pe_number];
480         if (pe->tce_bypass_enabled) {
481                 top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1;
482                 bypass = (dma_mask >= top);
483         }
484
485         if (bypass) {
486                 dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
487                 set_dma_ops(&pdev->dev, &dma_direct_ops);
488                 set_dma_offset(&pdev->dev, pe->tce_bypass_base);
489         } else {
490                 dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
491                 set_dma_ops(&pdev->dev, &dma_iommu_ops);
492                 set_iommu_table_base(&pdev->dev, &pe->tce32_table);
493         }
494         *pdev->dev.dma_mask = dma_mask;
495         return 0;
496 }
497
498 static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
499 {
500         struct pci_dev *dev;
501
502         list_for_each_entry(dev, &bus->devices, bus_list) {
503                 set_iommu_table_base_and_group(&dev->dev, &pe->tce32_table);
504                 if (dev->subordinate)
505                         pnv_ioda_setup_bus_dma(pe, dev->subordinate);
506         }
507 }
508
509 static void pnv_pci_ioda1_tce_invalidate(struct pnv_ioda_pe *pe,
510                                          struct iommu_table *tbl,
511                                          __be64 *startp, __be64 *endp, bool rm)
512 {
513         __be64 __iomem *invalidate = rm ?
514                 (__be64 __iomem *)pe->tce_inval_reg_phys :
515                 (__be64 __iomem *)tbl->it_index;
516         unsigned long start, end, inc;
517
518         start = __pa(startp);
519         end = __pa(endp);
520
521         /* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */
522         if (tbl->it_busno) {
523                 start <<= 12;
524                 end <<= 12;
525                 inc = 128 << 12;
526                 start |= tbl->it_busno;
527                 end |= tbl->it_busno;
528         } else if (tbl->it_type & TCE_PCI_SWINV_PAIR) {
529                 /* p7ioc-style invalidation, 2 TCEs per write */
530                 start |= (1ull << 63);
531                 end |= (1ull << 63);
532                 inc = 16;
533         } else {
534                 /* Default (older HW) */
535                 inc = 128;
536         }
537
538         end |= inc - 1; /* round up end to be different than start */
539
540         mb(); /* Ensure above stores are visible */
541         while (start <= end) {
542                 if (rm)
543                         __raw_rm_writeq(cpu_to_be64(start), invalidate);
544                 else
545                         __raw_writeq(cpu_to_be64(start), invalidate);
546                 start += inc;
547         }
548
549         /*
550          * The iommu layer will do another mb() for us on build()
551          * and we don't care on free()
552          */
553 }
554
555 static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe,
556                                          struct iommu_table *tbl,
557                                          __be64 *startp, __be64 *endp, bool rm)
558 {
559         unsigned long start, end, inc;
560         __be64 __iomem *invalidate = rm ?
561                 (__be64 __iomem *)pe->tce_inval_reg_phys :
562                 (__be64 __iomem *)tbl->it_index;
563
564         /* We'll invalidate DMA address in PE scope */
565         start = 0x2ul << 60;
566         start |= (pe->pe_number & 0xFF);
567         end = start;
568
569         /* Figure out the start, end and step */
570         inc = tbl->it_offset + (((u64)startp - tbl->it_base) / sizeof(u64));
571         start |= (inc << 12);
572         inc = tbl->it_offset + (((u64)endp - tbl->it_base) / sizeof(u64));
573         end |= (inc << 12);
574         inc = (0x1ul << 12);
575         mb();
576
577         while (start <= end) {
578                 if (rm)
579                         __raw_rm_writeq(cpu_to_be64(start), invalidate);
580                 else
581                         __raw_writeq(cpu_to_be64(start), invalidate);
582                 start += inc;
583         }
584 }
585
586 void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
587                                  __be64 *startp, __be64 *endp, bool rm)
588 {
589         struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
590                                               tce32_table);
591         struct pnv_phb *phb = pe->phb;
592
593         if (phb->type == PNV_PHB_IODA1)
594                 pnv_pci_ioda1_tce_invalidate(pe, tbl, startp, endp, rm);
595         else
596                 pnv_pci_ioda2_tce_invalidate(pe, tbl, startp, endp, rm);
597 }
598
599 static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
600                                       struct pnv_ioda_pe *pe, unsigned int base,
601                                       unsigned int segs)
602 {
603
604         struct page *tce_mem = NULL;
605         const __be64 *swinvp;
606         struct iommu_table *tbl;
607         unsigned int i;
608         int64_t rc;
609         void *addr;
610
611         /* 256M DMA window, 4K TCE pages, 8 bytes TCE */
612 #define TCE32_TABLE_SIZE        ((0x10000000 / 0x1000) * 8)
613
614         /* XXX FIXME: Handle 64-bit only DMA devices */
615         /* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
616         /* XXX FIXME: Allocate multi-level tables on PHB3 */
617
618         /* We shouldn't already have a 32-bit DMA associated */
619         if (WARN_ON(pe->tce32_seg >= 0))
620                 return;
621
622         /* Grab a 32-bit TCE table */
623         pe->tce32_seg = base;
624         pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n",
625                 (base << 28), ((base + segs) << 28) - 1);
626
627         /* XXX Currently, we allocate one big contiguous table for the
628          * TCEs. We only really need one chunk per 256M of TCE space
629          * (ie per segment) but that's an optimization for later, it
630          * requires some added smarts with our get/put_tce implementation
631          */
632         tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
633                                    get_order(TCE32_TABLE_SIZE * segs));
634         if (!tce_mem) {
635                 pe_err(pe, " Failed to allocate a 32-bit TCE memory\n");
636                 goto fail;
637         }
638         addr = page_address(tce_mem);
639         memset(addr, 0, TCE32_TABLE_SIZE * segs);
640
641         /* Configure HW */
642         for (i = 0; i < segs; i++) {
643                 rc = opal_pci_map_pe_dma_window(phb->opal_id,
644                                               pe->pe_number,
645                                               base + i, 1,
646                                               __pa(addr) + TCE32_TABLE_SIZE * i,
647                                               TCE32_TABLE_SIZE, 0x1000);
648                 if (rc) {
649                         pe_err(pe, " Failed to configure 32-bit TCE table,"
650                                " err %ld\n", rc);
651                         goto fail;
652                 }
653         }
654
655         /* Setup linux iommu table */
656         tbl = &pe->tce32_table;
657         pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs,
658                                   base << 28);
659
660         /* OPAL variant of P7IOC SW invalidated TCEs */
661         swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
662         if (swinvp) {
663                 /* We need a couple more fields -- an address and a data
664                  * to or.  Since the bus is only printed out on table free
665                  * errors, and on the first pass the data will be a relative
666                  * bus number, print that out instead.
667                  */
668                 tbl->it_busno = 0;
669                 pe->tce_inval_reg_phys = be64_to_cpup(swinvp);
670                 tbl->it_index = (unsigned long)ioremap(pe->tce_inval_reg_phys,
671                                 8);
672                 tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE |
673                                TCE_PCI_SWINV_PAIR;
674         }
675         iommu_init_table(tbl, phb->hose->node);
676         iommu_register_group(tbl, pci_domain_nr(pe->pbus), pe->pe_number);
677
678         if (pe->pdev)
679                 set_iommu_table_base_and_group(&pe->pdev->dev, tbl);
680         else
681                 pnv_ioda_setup_bus_dma(pe, pe->pbus);
682
683         return;
684  fail:
685         /* XXX Failure: Try to fallback to 64-bit only ? */
686         if (pe->tce32_seg >= 0)
687                 pe->tce32_seg = -1;
688         if (tce_mem)
689                 __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
690 }
691
692 static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable)
693 {
694         struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
695                                               tce32_table);
696         uint16_t window_id = (pe->pe_number << 1 ) + 1;
697         int64_t rc;
698
699         pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis");
700         if (enable) {
701                 phys_addr_t top = memblock_end_of_DRAM();
702
703                 top = roundup_pow_of_two(top);
704                 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
705                                                      pe->pe_number,
706                                                      window_id,
707                                                      pe->tce_bypass_base,
708                                                      top);
709         } else {
710                 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
711                                                      pe->pe_number,
712                                                      window_id,
713                                                      pe->tce_bypass_base,
714                                                      0);
715
716                 /*
717                  * We might want to reset the DMA ops of all devices on
718                  * this PE. However in theory, that shouldn't be necessary
719                  * as this is used for VFIO/KVM pass-through and the device
720                  * hasn't yet been returned to its kernel driver
721                  */
722         }
723         if (rc)
724                 pe_err(pe, "OPAL error %lld configuring bypass window\n", rc);
725         else
726                 pe->tce_bypass_enabled = enable;
727 }
728
729 static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb,
730                                           struct pnv_ioda_pe *pe)
731 {
732         /* TVE #1 is selected by PCI address bit 59 */
733         pe->tce_bypass_base = 1ull << 59;
734
735         /* Install set_bypass callback for VFIO */
736         pe->tce32_table.set_bypass = pnv_pci_ioda2_set_bypass;
737
738         /* Enable bypass by default */
739         pnv_pci_ioda2_set_bypass(&pe->tce32_table, true);
740 }
741
742 static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
743                                        struct pnv_ioda_pe *pe)
744 {
745         struct page *tce_mem = NULL;
746         void *addr;
747         const __be64 *swinvp;
748         struct iommu_table *tbl;
749         unsigned int tce_table_size, end;
750         int64_t rc;
751
752         /* We shouldn't already have a 32-bit DMA associated */
753         if (WARN_ON(pe->tce32_seg >= 0))
754                 return;
755
756         /* The PE will reserve all possible 32-bits space */
757         pe->tce32_seg = 0;
758         end = (1 << ilog2(phb->ioda.m32_pci_base));
759         tce_table_size = (end / 0x1000) * 8;
760         pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
761                 end);
762
763         /* Allocate TCE table */
764         tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
765                                    get_order(tce_table_size));
766         if (!tce_mem) {
767                 pe_err(pe, "Failed to allocate a 32-bit TCE memory\n");
768                 goto fail;
769         }
770         addr = page_address(tce_mem);
771         memset(addr, 0, tce_table_size);
772
773         /*
774          * Map TCE table through TVT. The TVE index is the PE number
775          * shifted by 1 bit for 32-bits DMA space.
776          */
777         rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
778                                         pe->pe_number << 1, 1, __pa(addr),
779                                         tce_table_size, 0x1000);
780         if (rc) {
781                 pe_err(pe, "Failed to configure 32-bit TCE table,"
782                        " err %ld\n", rc);
783                 goto fail;
784         }
785
786         /* Setup linux iommu table */
787         tbl = &pe->tce32_table;
788         pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, 0);
789
790         /* OPAL variant of PHB3 invalidated TCEs */
791         swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
792         if (swinvp) {
793                 /* We need a couple more fields -- an address and a data
794                  * to or.  Since the bus is only printed out on table free
795                  * errors, and on the first pass the data will be a relative
796                  * bus number, print that out instead.
797                  */
798                 tbl->it_busno = 0;
799                 pe->tce_inval_reg_phys = be64_to_cpup(swinvp);
800                 tbl->it_index = (unsigned long)ioremap(pe->tce_inval_reg_phys,
801                                 8);
802                 tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
803         }
804         iommu_init_table(tbl, phb->hose->node);
805         iommu_register_group(tbl, pci_domain_nr(pe->pbus), pe->pe_number);
806
807         if (pe->pdev)
808                 set_iommu_table_base_and_group(&pe->pdev->dev, tbl);
809         else
810                 pnv_ioda_setup_bus_dma(pe, pe->pbus);
811
812         /* Also create a bypass window */
813         pnv_pci_ioda2_setup_bypass_pe(phb, pe);
814         return;
815 fail:
816         if (pe->tce32_seg >= 0)
817                 pe->tce32_seg = -1;
818         if (tce_mem)
819                 __free_pages(tce_mem, get_order(tce_table_size));
820 }
821
822 static void pnv_ioda_setup_dma(struct pnv_phb *phb)
823 {
824         struct pci_controller *hose = phb->hose;
825         unsigned int residual, remaining, segs, tw, base;
826         struct pnv_ioda_pe *pe;
827
828         /* If we have more PE# than segments available, hand out one
829          * per PE until we run out and let the rest fail. If not,
830          * then we assign at least one segment per PE, plus more based
831          * on the amount of devices under that PE
832          */
833         if (phb->ioda.dma_pe_count > phb->ioda.tce32_count)
834                 residual = 0;
835         else
836                 residual = phb->ioda.tce32_count -
837                         phb->ioda.dma_pe_count;
838
839         pr_info("PCI: Domain %04x has %ld available 32-bit DMA segments\n",
840                 hose->global_number, phb->ioda.tce32_count);
841         pr_info("PCI: %d PE# for a total weight of %d\n",
842                 phb->ioda.dma_pe_count, phb->ioda.dma_weight);
843
844         /* Walk our PE list and configure their DMA segments, hand them
845          * out one base segment plus any residual segments based on
846          * weight
847          */
848         remaining = phb->ioda.tce32_count;
849         tw = phb->ioda.dma_weight;
850         base = 0;
851         list_for_each_entry(pe, &phb->ioda.pe_dma_list, dma_link) {
852                 if (!pe->dma_weight)
853                         continue;
854                 if (!remaining) {
855                         pe_warn(pe, "No DMA32 resources available\n");
856                         continue;
857                 }
858                 segs = 1;
859                 if (residual) {
860                         segs += ((pe->dma_weight * residual)  + (tw / 2)) / tw;
861                         if (segs > remaining)
862                                 segs = remaining;
863                 }
864
865                 /*
866                  * For IODA2 compliant PHB3, we needn't care about the weight.
867                  * The all available 32-bits DMA space will be assigned to
868                  * the specific PE.
869                  */
870                 if (phb->type == PNV_PHB_IODA1) {
871                         pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n",
872                                 pe->dma_weight, segs);
873                         pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs);
874                 } else {
875                         pe_info(pe, "Assign DMA32 space\n");
876                         segs = 0;
877                         pnv_pci_ioda2_setup_dma_pe(phb, pe);
878                 }
879
880                 remaining -= segs;
881                 base += segs;
882         }
883 }
884
885 #ifdef CONFIG_PCI_MSI
886 static void pnv_ioda2_msi_eoi(struct irq_data *d)
887 {
888         unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
889         struct irq_chip *chip = irq_data_get_irq_chip(d);
890         struct pnv_phb *phb = container_of(chip, struct pnv_phb,
891                                            ioda.irq_chip);
892         int64_t rc;
893
894         rc = opal_pci_msi_eoi(phb->opal_id, hw_irq);
895         WARN_ON_ONCE(rc);
896
897         icp_native_eoi(d);
898 }
899
900 static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
901                                   unsigned int hwirq, unsigned int virq,
902                                   unsigned int is_64, struct msi_msg *msg)
903 {
904         struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
905         struct pci_dn *pdn = pci_get_pdn(dev);
906         struct irq_data *idata;
907         struct irq_chip *ichip;
908         unsigned int xive_num = hwirq - phb->msi_base;
909         __be32 data;
910         int rc;
911
912         /* No PE assigned ? bail out ... no MSI for you ! */
913         if (pe == NULL)
914                 return -ENXIO;
915
916         /* Check if we have an MVE */
917         if (pe->mve_number < 0)
918                 return -ENXIO;
919
920         /* Force 32-bit MSI on some broken devices */
921         if (pdn && pdn->force_32bit_msi)
922                 is_64 = 0;
923
924         /* Assign XIVE to PE */
925         rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
926         if (rc) {
927                 pr_warn("%s: OPAL error %d setting XIVE %d PE\n",
928                         pci_name(dev), rc, xive_num);
929                 return -EIO;
930         }
931
932         if (is_64) {
933                 __be64 addr64;
934
935                 rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
936                                      &addr64, &data);
937                 if (rc) {
938                         pr_warn("%s: OPAL error %d getting 64-bit MSI data\n",
939                                 pci_name(dev), rc);
940                         return -EIO;
941                 }
942                 msg->address_hi = be64_to_cpu(addr64) >> 32;
943                 msg->address_lo = be64_to_cpu(addr64) & 0xfffffffful;
944         } else {
945                 __be32 addr32;
946
947                 rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
948                                      &addr32, &data);
949                 if (rc) {
950                         pr_warn("%s: OPAL error %d getting 32-bit MSI data\n",
951                                 pci_name(dev), rc);
952                         return -EIO;
953                 }
954                 msg->address_hi = 0;
955                 msg->address_lo = be32_to_cpu(addr32);
956         }
957         msg->data = be32_to_cpu(data);
958
959         /*
960          * Change the IRQ chip for the MSI interrupts on PHB3.
961          * The corresponding IRQ chip should be populated for
962          * the first time.
963          */
964         if (phb->type == PNV_PHB_IODA2) {
965                 if (!phb->ioda.irq_chip_init) {
966                         idata = irq_get_irq_data(virq);
967                         ichip = irq_data_get_irq_chip(idata);
968                         phb->ioda.irq_chip_init = 1;
969                         phb->ioda.irq_chip = *ichip;
970                         phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
971                 }
972
973                 irq_set_chip(virq, &phb->ioda.irq_chip);
974         }
975
976         pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
977                  " address=%x_%08x data=%x PE# %d\n",
978                  pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
979                  msg->address_hi, msg->address_lo, data, pe->pe_number);
980
981         return 0;
982 }
983
984 static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
985 {
986         unsigned int count;
987         const __be32 *prop = of_get_property(phb->hose->dn,
988                                              "ibm,opal-msi-ranges", NULL);
989         if (!prop) {
990                 /* BML Fallback */
991                 prop = of_get_property(phb->hose->dn, "msi-ranges", NULL);
992         }
993         if (!prop)
994                 return;
995
996         phb->msi_base = be32_to_cpup(prop);
997         count = be32_to_cpup(prop + 1);
998         if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
999                 pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
1000                        phb->hose->global_number);
1001                 return;
1002         }
1003
1004         phb->msi_setup = pnv_pci_ioda_msi_setup;
1005         phb->msi32_support = 1;
1006         pr_info("  Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
1007                 count, phb->msi_base);
1008 }
1009 #else
1010 static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
1011 #endif /* CONFIG_PCI_MSI */
1012
1013 /*
1014  * This function is supposed to be called on basis of PE from top
1015  * to bottom style. So the the I/O or MMIO segment assigned to
1016  * parent PE could be overrided by its child PEs if necessary.
1017  */
1018 static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
1019                                   struct pnv_ioda_pe *pe)
1020 {
1021         struct pnv_phb *phb = hose->private_data;
1022         struct pci_bus_region region;
1023         struct resource *res;
1024         int i, index;
1025         int rc;
1026
1027         /*
1028          * NOTE: We only care PCI bus based PE for now. For PCI
1029          * device based PE, for example SRIOV sensitive VF should
1030          * be figured out later.
1031          */
1032         BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
1033
1034         pci_bus_for_each_resource(pe->pbus, res, i) {
1035                 if (!res || !res->flags ||
1036                     res->start > res->end)
1037                         continue;
1038
1039                 if (res->flags & IORESOURCE_IO) {
1040                         region.start = res->start - phb->ioda.io_pci_base;
1041                         region.end   = res->end - phb->ioda.io_pci_base;
1042                         index = region.start / phb->ioda.io_segsize;
1043
1044                         while (index < phb->ioda.total_pe &&
1045                                region.start <= region.end) {
1046                                 phb->ioda.io_segmap[index] = pe->pe_number;
1047                                 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
1048                                         pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
1049                                 if (rc != OPAL_SUCCESS) {
1050                                         pr_err("%s: OPAL error %d when mapping IO "
1051                                                "segment #%d to PE#%d\n",
1052                                                __func__, rc, index, pe->pe_number);
1053                                         break;
1054                                 }
1055
1056                                 region.start += phb->ioda.io_segsize;
1057                                 index++;
1058                         }
1059                 } else if (res->flags & IORESOURCE_MEM) {
1060                         /* WARNING: Assumes M32 is mem region 0 in PHB. We need to
1061                          * harden that algorithm when we start supporting M64
1062                          */
1063                         region.start = res->start -
1064                                        hose->mem_offset[0] -
1065                                        phb->ioda.m32_pci_base;
1066                         region.end   = res->end -
1067                                        hose->mem_offset[0] -
1068                                        phb->ioda.m32_pci_base;
1069                         index = region.start / phb->ioda.m32_segsize;
1070
1071                         while (index < phb->ioda.total_pe &&
1072                                region.start <= region.end) {
1073                                 phb->ioda.m32_segmap[index] = pe->pe_number;
1074                                 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
1075                                         pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
1076                                 if (rc != OPAL_SUCCESS) {
1077                                         pr_err("%s: OPAL error %d when mapping M32 "
1078                                                "segment#%d to PE#%d",
1079                                                __func__, rc, index, pe->pe_number);
1080                                         break;
1081                                 }
1082
1083                                 region.start += phb->ioda.m32_segsize;
1084                                 index++;
1085                         }
1086                 }
1087         }
1088 }
1089
1090 static void pnv_pci_ioda_setup_seg(void)
1091 {
1092         struct pci_controller *tmp, *hose;
1093         struct pnv_phb *phb;
1094         struct pnv_ioda_pe *pe;
1095
1096         list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1097                 phb = hose->private_data;
1098                 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
1099                         pnv_ioda_setup_pe_seg(hose, pe);
1100                 }
1101         }
1102 }
1103
1104 static void pnv_pci_ioda_setup_DMA(void)
1105 {
1106         struct pci_controller *hose, *tmp;
1107         struct pnv_phb *phb;
1108
1109         list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1110                 pnv_ioda_setup_dma(hose->private_data);
1111
1112                 /* Mark the PHB initialization done */
1113                 phb = hose->private_data;
1114                 phb->initialized = 1;
1115         }
1116 }
1117
1118 static void pnv_pci_ioda_create_dbgfs(void)
1119 {
1120 #ifdef CONFIG_DEBUG_FS
1121         struct pci_controller *hose, *tmp;
1122         struct pnv_phb *phb;
1123         char name[16];
1124
1125         list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1126                 phb = hose->private_data;
1127
1128                 sprintf(name, "PCI%04x", hose->global_number);
1129                 phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root);
1130                 if (!phb->dbgfs)
1131                         pr_warning("%s: Error on creating debugfs on PHB#%x\n",
1132                                 __func__, hose->global_number);
1133         }
1134 #endif /* CONFIG_DEBUG_FS */
1135 }
1136
1137 static void pnv_pci_ioda_fixup(void)
1138 {
1139         pnv_pci_ioda_setup_PEs();
1140         pnv_pci_ioda_setup_seg();
1141         pnv_pci_ioda_setup_DMA();
1142
1143         pnv_pci_ioda_create_dbgfs();
1144
1145 #ifdef CONFIG_EEH
1146         eeh_probe_mode_set(EEH_PROBE_MODE_DEV);
1147         eeh_addr_cache_build();
1148         eeh_init();
1149 #endif
1150 }
1151
1152 /*
1153  * Returns the alignment for I/O or memory windows for P2P
1154  * bridges. That actually depends on how PEs are segmented.
1155  * For now, we return I/O or M32 segment size for PE sensitive
1156  * P2P bridges. Otherwise, the default values (4KiB for I/O,
1157  * 1MiB for memory) will be returned.
1158  *
1159  * The current PCI bus might be put into one PE, which was
1160  * create against the parent PCI bridge. For that case, we
1161  * needn't enlarge the alignment so that we can save some
1162  * resources.
1163  */
1164 static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
1165                                                 unsigned long type)
1166 {
1167         struct pci_dev *bridge;
1168         struct pci_controller *hose = pci_bus_to_host(bus);
1169         struct pnv_phb *phb = hose->private_data;
1170         int num_pci_bridges = 0;
1171
1172         bridge = bus->self;
1173         while (bridge) {
1174                 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) {
1175                         num_pci_bridges++;
1176                         if (num_pci_bridges >= 2)
1177                                 return 1;
1178                 }
1179
1180                 bridge = bridge->bus->self;
1181         }
1182
1183         /* We need support prefetchable memory window later */
1184         if (type & IORESOURCE_MEM)
1185                 return phb->ioda.m32_segsize;
1186
1187         return phb->ioda.io_segsize;
1188 }
1189
1190 /* Prevent enabling devices for which we couldn't properly
1191  * assign a PE
1192  */
1193 static int pnv_pci_enable_device_hook(struct pci_dev *dev)
1194 {
1195         struct pci_controller *hose = pci_bus_to_host(dev->bus);
1196         struct pnv_phb *phb = hose->private_data;
1197         struct pci_dn *pdn;
1198
1199         /* The function is probably called while the PEs have
1200          * not be created yet. For example, resource reassignment
1201          * during PCI probe period. We just skip the check if
1202          * PEs isn't ready.
1203          */
1204         if (!phb->initialized)
1205                 return 0;
1206
1207         pdn = pci_get_pdn(dev);
1208         if (!pdn || pdn->pe_number == IODA_INVALID_PE)
1209                 return -EINVAL;
1210
1211         return 0;
1212 }
1213
1214 static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
1215                                u32 devfn)
1216 {
1217         return phb->ioda.pe_rmap[(bus->number << 8) | devfn];
1218 }
1219
1220 static void pnv_pci_ioda_shutdown(struct pnv_phb *phb)
1221 {
1222         opal_pci_reset(phb->opal_id, OPAL_PCI_IODA_TABLE_RESET,
1223                        OPAL_ASSERT_RESET);
1224 }
1225
1226 void __init pnv_pci_init_ioda_phb(struct device_node *np,
1227                                   u64 hub_id, int ioda_type)
1228 {
1229         struct pci_controller *hose;
1230         struct pnv_phb *phb;
1231         unsigned long size, m32map_off, pemap_off, iomap_off = 0;
1232         const __be64 *prop64;
1233         const __be32 *prop32;
1234         int len;
1235         u64 phb_id;
1236         void *aux;
1237         long rc;
1238
1239         pr_info("Initializing IODA%d OPAL PHB %s\n", ioda_type, np->full_name);
1240
1241         prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
1242         if (!prop64) {
1243                 pr_err("  Missing \"ibm,opal-phbid\" property !\n");
1244                 return;
1245         }
1246         phb_id = be64_to_cpup(prop64);
1247         pr_debug("  PHB-ID  : 0x%016llx\n", phb_id);
1248
1249         phb = alloc_bootmem(sizeof(struct pnv_phb));
1250         if (!phb) {
1251                 pr_err("  Out of memory !\n");
1252                 return;
1253         }
1254
1255         /* Allocate PCI controller */
1256         memset(phb, 0, sizeof(struct pnv_phb));
1257         phb->hose = hose = pcibios_alloc_controller(np);
1258         if (!phb->hose) {
1259                 pr_err("  Can't allocate PCI controller for %s\n",
1260                        np->full_name);
1261                 free_bootmem((unsigned long)phb, sizeof(struct pnv_phb));
1262                 return;
1263         }
1264
1265         spin_lock_init(&phb->lock);
1266         prop32 = of_get_property(np, "bus-range", &len);
1267         if (prop32 && len == 8) {
1268                 hose->first_busno = be32_to_cpu(prop32[0]);
1269                 hose->last_busno = be32_to_cpu(prop32[1]);
1270         } else {
1271                 pr_warn("  Broken <bus-range> on %s\n", np->full_name);
1272                 hose->first_busno = 0;
1273                 hose->last_busno = 0xff;
1274         }
1275         hose->private_data = phb;
1276         phb->hub_id = hub_id;
1277         phb->opal_id = phb_id;
1278         phb->type = ioda_type;
1279
1280         /* Detect specific models for error handling */
1281         if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
1282                 phb->model = PNV_PHB_MODEL_P7IOC;
1283         else if (of_device_is_compatible(np, "ibm,power8-pciex"))
1284                 phb->model = PNV_PHB_MODEL_PHB3;
1285         else
1286                 phb->model = PNV_PHB_MODEL_UNKNOWN;
1287
1288         /* Parse 32-bit and IO ranges (if any) */
1289         pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
1290
1291         /* Get registers */
1292         phb->regs = of_iomap(np, 0);
1293         if (phb->regs == NULL)
1294                 pr_err("  Failed to map registers !\n");
1295
1296         /* Initialize more IODA stuff */
1297         phb->ioda.total_pe = 1;
1298         prop32 = of_get_property(np, "ibm,opal-num-pes", NULL);
1299         if (prop32)
1300                 phb->ioda.total_pe = be32_to_cpup(prop32);
1301         prop32 = of_get_property(np, "ibm,opal-reserved-pe", NULL);
1302         if (prop32)
1303                 phb->ioda.reserved_pe = be32_to_cpup(prop32);
1304         phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
1305         /* FW Has already off top 64k of M32 space (MSI space) */
1306         phb->ioda.m32_size += 0x10000;
1307
1308         phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe;
1309         phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0];
1310         phb->ioda.io_size = hose->pci_io_size;
1311         phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe;
1312         phb->ioda.io_pci_base = 0; /* XXX calculate this ? */
1313
1314         /* Allocate aux data & arrays. We don't have IO ports on PHB3 */
1315         size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
1316         m32map_off = size;
1317         size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]);
1318         if (phb->type == PNV_PHB_IODA1) {
1319                 iomap_off = size;
1320                 size += phb->ioda.total_pe * sizeof(phb->ioda.io_segmap[0]);
1321         }
1322         pemap_off = size;
1323         size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe);
1324         aux = alloc_bootmem(size);
1325         memset(aux, 0, size);
1326         phb->ioda.pe_alloc = aux;
1327         phb->ioda.m32_segmap = aux + m32map_off;
1328         if (phb->type == PNV_PHB_IODA1)
1329                 phb->ioda.io_segmap = aux + iomap_off;
1330         phb->ioda.pe_array = aux + pemap_off;
1331         set_bit(phb->ioda.reserved_pe, phb->ioda.pe_alloc);
1332
1333         INIT_LIST_HEAD(&phb->ioda.pe_dma_list);
1334         INIT_LIST_HEAD(&phb->ioda.pe_list);
1335
1336         /* Calculate how many 32-bit TCE segments we have */
1337         phb->ioda.tce32_count = phb->ioda.m32_pci_base >> 28;
1338
1339         /* Clear unusable m64 */
1340         hose->mem_resources[1].flags = 0;
1341         hose->mem_resources[1].start = 0;
1342         hose->mem_resources[1].end = 0;
1343         hose->mem_resources[2].flags = 0;
1344         hose->mem_resources[2].start = 0;
1345         hose->mem_resources[2].end = 0;
1346
1347 #if 0 /* We should really do that ... */
1348         rc = opal_pci_set_phb_mem_window(opal->phb_id,
1349                                          window_type,
1350                                          window_num,
1351                                          starting_real_address,
1352                                          starting_pci_address,
1353                                          segment_size);
1354 #endif
1355
1356         pr_info("  %d (%d) PE's M32: 0x%x [segment=0x%x]"
1357                 " IO: 0x%x [segment=0x%x]\n",
1358                 phb->ioda.total_pe,
1359                 phb->ioda.reserved_pe,
1360                 phb->ioda.m32_size, phb->ioda.m32_segsize,
1361                 phb->ioda.io_size, phb->ioda.io_segsize);
1362
1363         phb->hose->ops = &pnv_pci_ops;
1364 #ifdef CONFIG_EEH
1365         phb->eeh_ops = &ioda_eeh_ops;
1366 #endif
1367
1368         /* Setup RID -> PE mapping function */
1369         phb->bdfn_to_pe = pnv_ioda_bdfn_to_pe;
1370
1371         /* Setup TCEs */
1372         phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
1373         phb->dma_set_mask = pnv_pci_ioda_dma_set_mask;
1374
1375         /* Setup shutdown function for kexec */
1376         phb->shutdown = pnv_pci_ioda_shutdown;
1377
1378         /* Setup MSI support */
1379         pnv_pci_init_ioda_msis(phb);
1380
1381         /*
1382          * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here
1383          * to let the PCI core do resource assignment. It's supposed
1384          * that the PCI core will do correct I/O and MMIO alignment
1385          * for the P2P bridge bars so that each PCI bus (excluding
1386          * the child P2P bridges) can form individual PE.
1387          */
1388         ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
1389         ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook;
1390         ppc_md.pcibios_window_alignment = pnv_pci_window_alignment;
1391         pci_add_flags(PCI_REASSIGN_ALL_RSRC);
1392
1393         /* Reset IODA tables to a clean state */
1394         rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET);
1395         if (rc)
1396                 pr_warning("  OPAL Error %ld performing IODA table reset !\n", rc);
1397 }
1398
1399 void __init pnv_pci_init_ioda2_phb(struct device_node *np)
1400 {
1401         pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
1402 }
1403
1404 void __init pnv_pci_init_ioda_hub(struct device_node *np)
1405 {
1406         struct device_node *phbn;
1407         const __be64 *prop64;
1408         u64 hub_id;
1409
1410         pr_info("Probing IODA IO-Hub %s\n", np->full_name);
1411
1412         prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
1413         if (!prop64) {
1414                 pr_err(" Missing \"ibm,opal-hubid\" property !\n");
1415                 return;
1416         }
1417         hub_id = be64_to_cpup(prop64);
1418         pr_devel(" HUB-ID : 0x%016llx\n", hub_id);
1419
1420         /* Count child PHBs */
1421         for_each_child_of_node(np, phbn) {
1422                 /* Look for IODA1 PHBs */
1423                 if (of_device_is_compatible(phbn, "ibm,ioda-phb"))
1424                         pnv_pci_init_ioda_phb(phbn, hub_id, PNV_PHB_IODA1);
1425         }
1426 }