struct pnv_ioda_pe *pe;
struct pci_dn *pdn;
- if (npe->flags & PNV_IODA_PE_PEER) {
- pe = npe->peers[0];
- pdev = pe->pdev;
- } else {
- pdev = pnv_pci_get_gpu_dev(npe->pdev);
- if (!pdev)
- return NULL;
-
- pdn = pci_get_pdn(pdev);
- if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
- return NULL;
+ pdev = pnv_pci_get_gpu_dev(npe->pdev);
+ if (!pdev)
+ return NULL;
- hose = pci_bus_to_host(pdev->bus);
- phb = hose->private_data;
- pe = &phb->ioda.pe_array[pdn->pe_number];
- }
+ pdn = pci_get_pdn(pdev);
+ if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
+ return NULL;
+
+ hose = pci_bus_to_host(pdev->bus);
+ phb = hose->private_data;
+ pe = &phb->ioda.pe_array[pdn->pe_number];
if (gpdev)
*gpdev = pdev;
}
pnv_pci_ioda2_tce_invalidate_entire(phb, false);
+ /* Add the table to the list so its TCE cache will get invalidated */
+ pnv_pci_link_table_and_group(phb->hose->node, 0,
+ tbl, &npe->table_group);
+
return 0;
}
}
pnv_pci_ioda2_tce_invalidate_entire(phb, false);
- return 0;
-}
-
-void pnv_npu_init_dma_pe(struct pnv_ioda_pe *npe)
-{
- struct pnv_ioda_pe *gpe;
- struct pci_dev *gpdev;
- int i, avail = -1;
-
- if (!npe->pdev || !(npe->flags & PNV_IODA_PE_DEV))
- return;
-
- gpe = get_gpu_pci_dev_and_pe(npe, &gpdev);
- if (!gpe)
- return;
-
- for (i = 0; i < PNV_IODA_MAX_PEER_PES; i++) {
- /* Nothing to do if the PE is already connected. */
- if (gpe->peers[i] == npe)
- return;
-
- if (!gpe->peers[i])
- avail = i;
- }
+ pnv_pci_unlink_table_and_group(npe->table_group.tables[0],
+ &npe->table_group);
- if (WARN_ON(avail < 0))
- return;
-
- gpe->peers[avail] = npe;
- gpe->flags |= PNV_IODA_PE_PEER;
-
- /*
- * We assume that the NPU devices only have a single peer PE
- * (the GPU PCIe device PE).
- */
- npe->peers[0] = gpe;
- npe->flags |= PNV_IODA_PE_PEER;
+ return 0;
}
/*
npe->pe_number, npe->pe_number,
0 /* bypass base */, top);
+ if (rc == OPAL_SUCCESS)
+ pnv_pci_ioda2_tce_invalidate_entire(phb, false);
+
return rc;
}
/* 01xb - invalidate TCEs that match the specified PE# */
unsigned long val = TCE_KILL_INVAL_PE | (pe->pe_number & 0xFF);
struct pnv_phb *phb = pe->phb;
- struct pnv_ioda_pe *npe;
- int i;
if (!phb->ioda.tce_inval_reg)
return;
mb(); /* Ensure above stores are visible */
__raw_writeq(cpu_to_be64(val), phb->ioda.tce_inval_reg);
-
- if (pe->flags & PNV_IODA_PE_PEER)
- for (i = 0; i < PNV_IODA_MAX_PEER_PES; i++) {
- npe = pe->peers[i];
- if (!npe || npe->phb->type != PNV_PHB_NPU)
- continue;
-
- pnv_pci_ioda2_tce_invalidate_entire(npe->phb, false);
- }
}
static void pnv_pci_ioda2_do_tce_invalidate(unsigned pe_number, bool rm,
struct iommu_table_group_link *tgl;
list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
- struct pnv_ioda_pe *npe;
struct pnv_ioda_pe *pe = container_of(tgl->table_group,
struct pnv_ioda_pe, table_group);
__be64 __iomem *invalidate = rm ?
(__be64 __iomem *)pe->phb->ioda.tce_inval_reg_phys :
pe->phb->ioda.tce_inval_reg;
- int i;
-
- pnv_pci_ioda2_do_tce_invalidate(pe->pe_number, rm,
- invalidate, tbl->it_page_shift,
- index, npages);
- if (pe->flags & PNV_IODA_PE_PEER)
+ if (pe->phb->type == PNV_PHB_NPU) {
/*
* The NVLink hardware does not support TCE kill
* per TCE entry so we have to invalidate
* the entire cache for it.
*/
- for (i = 0; i < PNV_IODA_MAX_PEER_PES; i++) {
- npe = pe->peers[i];
- if (!npe || npe->phb->type != PNV_PHB_NPU ||
- !npe->phb->ioda.tce_inval_reg)
- continue;
-
- pnv_pci_ioda2_tce_invalidate_entire(npe->phb,
- rm);
- }
+ pnv_pci_ioda2_tce_invalidate_entire(pe->phb, rm);
+ continue;
+ }
+ pnv_pci_ioda2_do_tce_invalidate(pe->pe_number, rm,
+ invalidate, tbl->it_page_shift,
+ index, npages);
}
}
#endif /* CONFIG_DEBUG_FS */
}
-static void pnv_npu_ioda_fixup(void)
-{
- bool enable_bypass;
- struct pci_controller *hose, *tmp;
- struct pnv_phb *phb;
- struct pnv_ioda_pe *pe;
- unsigned int weight;
-
- list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
- phb = hose->private_data;
- if (phb->type != PNV_PHB_NPU)
- continue;
-
- list_for_each_entry(pe, &phb->ioda.pe_list, list) {
- weight = pnv_pci_ioda_pe_dma_weight(pe);
- if (WARN_ON(!weight))
- continue;
-
- enable_bypass = dma_get_mask(&pe->pdev->dev) ==
- DMA_BIT_MASK(64);
- pnv_npu_init_dma_pe(pe);
- }
- }
-}
-
static void pnv_pci_ioda_fixup(void)
{
pnv_pci_ioda_setup_PEs();
eeh_init();
eeh_addr_cache_build();
#endif
-
- /* Link NPU IODA tables to their PCI devices. */
- pnv_npu_ioda_fixup();
}
/*
#define PNV_IODA_PE_MASTER (1 << 3) /* Master PE in compound case */
#define PNV_IODA_PE_SLAVE (1 << 4) /* Slave PE in compound case */
#define PNV_IODA_PE_VF (1 << 5) /* PE for one VF */
-#define PNV_IODA_PE_PEER (1 << 6) /* PE has peers */
/* Data associated with a PE, including IOMMU tracking etc.. */
struct pnv_phb;
unsigned long flags;
struct pnv_phb *phb;
-#define PNV_IODA_MAX_PEER_PES 8
- struct pnv_ioda_pe *peers[PNV_IODA_MAX_PEER_PES];
-
/* A PE can be associated with a single device or an
* entire bus (& children). In the former case, pdev
* is populated, in the later case, pbus is.
pe_level_printk(pe, KERN_INFO, fmt, ##__VA_ARGS__)
/* Nvlink functions */
-extern void pnv_npu_init_dma_pe(struct pnv_ioda_pe *npe);
-extern void pnv_npu_setup_dma_pe(struct pnv_ioda_pe *npe);
extern void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass);
extern void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm);