void *aq_vec[AQ_CFG_PCI_FUNC_MSIX_IRQS];
resource_size_t mmio_pa;
unsigned int msix_entry_mask;
- unsigned int irq_type;
unsigned int ports;
bool is_pci_enabled;
bool is_regions;
bool is_pci_using_dac;
struct aq_hw_caps_s aq_hw_caps;
- struct msix_entry msix_entry[AQ_CFG_PCI_FUNC_MSIX_IRQS];
};
struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
int err = 0;
unsigned int bar = 0U;
unsigned int port = 0U;
- unsigned int i = 0U;
err = pci_enable_device(self->pdev);
if (err < 0)
}
}
- for (i = 0; i < self->aq_hw_caps.msix_irqs; i++)
- self->msix_entry[i].entry = i;
-
/*enable interrupts */
-#if AQ_CFG_FORCE_LEGACY_INT
- self->irq_type = AQ_HW_IRQ_LEGACY;
-#else
- err = pci_enable_msix(self->pdev, self->msix_entry,
- self->aq_hw_caps.msix_irqs);
+#if !AQ_CFG_FORCE_LEGACY_INT
+ err = pci_alloc_irq_vectors(self->pdev, self->aq_hw_caps.msix_irqs,
+ self->aq_hw_caps.msix_irqs, PCI_IRQ_MSIX);
- if (err >= 0) {
- self->irq_type = AQ_HW_IRQ_MSIX;
- } else {
- err = pci_enable_msi(self->pdev);
-
- if (err >= 0) {
- self->irq_type = AQ_HW_IRQ_MSI;
- } else {
- self->irq_type = AQ_HW_IRQ_LEGACY;
- err = 0;
- }
+ if (err < 0) {
+ err = pci_alloc_irq_vectors(self->pdev, 1, 1,
+ PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+ if (err < 0)
+ goto err_exit;
}
#endif
int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i,
char *name, void *aq_vec, cpumask_t *affinity_mask)
{
+ struct pci_dev *pdev = self->pdev;
int err = 0;
- switch (self->irq_type) {
- case AQ_HW_IRQ_MSIX:
- err = request_irq(self->msix_entry[i].vector, aq_vec_isr, 0,
+ if (pdev->msix_enabled || pdev->msi_enabled)
+ err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr, 0,
name, aq_vec);
- break;
-
- case AQ_HW_IRQ_MSI:
- err = request_irq(self->pdev->irq, aq_vec_isr, 0, name, aq_vec);
- break;
-
- case AQ_HW_IRQ_LEGACY:
- err = request_irq(self->pdev->irq, aq_vec_isr_legacy,
+ else
+ err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr_legacy,
IRQF_SHARED, name, aq_vec);
- break;
-
- default:
- err = -EFAULT;
- break;
- }
if (err >= 0) {
self->msix_entry_mask |= (1 << i);
self->aq_vec[i] = aq_vec;
- if (self->irq_type == AQ_HW_IRQ_MSIX)
- irq_set_affinity_hint(self->msix_entry[i].vector,
+ if (pdev->msix_enabled)
+ irq_set_affinity_hint(pci_irq_vector(pdev, i),
affinity_mask);
}
void aq_pci_func_free_irqs(struct aq_pci_func_s *self)
{
+ struct pci_dev *pdev = self->pdev;
unsigned int i = 0U;
for (i = 32U; i--;) {
if (!((1U << i) & self->msix_entry_mask))
continue;
- switch (self->irq_type) {
- case AQ_HW_IRQ_MSIX:
- irq_set_affinity_hint(self->msix_entry[i].vector, NULL);
- free_irq(self->msix_entry[i].vector, self->aq_vec[i]);
- break;
-
- case AQ_HW_IRQ_MSI:
- free_irq(self->pdev->irq, self->aq_vec[i]);
- break;
-
- case AQ_HW_IRQ_LEGACY:
- free_irq(self->pdev->irq, self->aq_vec[i]);
- break;
-
- default:
- break;
- }
-
+ free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
+ if (pdev->msix_enabled)
+ irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
self->msix_entry_mask &= ~(1U << i);
}
}
unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self)
{
- return self->irq_type;
+ if (self->pdev->msix_enabled)
+ return AQ_HW_IRQ_MSIX;
+ if (self->pdev->msi_enabled)
+ return AQ_HW_IRQ_MSIX;
+ return AQ_HW_IRQ_LEGACY;
}
void aq_pci_func_deinit(struct aq_pci_func_s *self)
goto err_exit;
aq_pci_func_free_irqs(self);
-
- switch (self->irq_type) {
- case AQ_HW_IRQ_MSI:
- pci_disable_msi(self->pdev);
- break;
-
- case AQ_HW_IRQ_MSIX:
- pci_disable_msix(self->pdev);
- break;
-
- case AQ_HW_IRQ_LEGACY:
- break;
-
- default:
- break;
- }
+ pci_free_irq_vectors(self->pdev);
if (self->is_regions)
pci_release_regions(self->pdev);