return table;
}
-static struct irq_remap_table *alloc_irq_table(u16 devid, bool ioapic)
+static struct irq_remap_table *alloc_irq_table(u16 devid)
{
struct irq_remap_table *table = NULL;
struct amd_iommu *iommu;
/* Initialize table spin-lock */
raw_spin_lock_init(&table->lock);
- if (ioapic)
- /* Keep the first 32 indexes free for IOAPIC interrupts */
- table->min_index = 32;
-
table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_ATOMIC);
if (!table->table) {
kfree(table);
memset(table->table, 0,
(MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
- if (ioapic) {
- int i;
-
- for (i = 0; i < 32; ++i)
- iommu->irte_ops->set_allocated(table, i);
- }
irq_lookup_table[devid] = table;
set_dte_irq_entry(devid, table);
if (!iommu)
return -ENODEV;
- table = alloc_irq_table(devid, false);
+ table = alloc_irq_table(devid);
if (!table)
return -ENODEV;
return ret;
if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
- if (alloc_irq_table(devid, true))
+ struct irq_remap_table *table;
+ struct amd_iommu *iommu;
+
+ table = alloc_irq_table(devid);
+ if (table) {
+ if (!table->min_index) {
+ /*
+ * Keep the first 32 indexes free for IOAPIC
+ * interrupts.
+ */
+ table->min_index = 32;
+ iommu = amd_iommu_rlookup_table[devid];
+ for (i = 0; i < 32; ++i)
+ iommu->irte_ops->set_allocated(table, i);
+ }
+ WARN_ON(table->min_index != 32);
index = info->ioapic_pin;
- else
+ } else {
ret = -ENOMEM;
+ }
} else {
bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI);