iommu_completion_wait(iommu);
}
-static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
-{
- int ret;
- u64 data;
- unsigned long flags;
- struct iommu_cmd cmd, cmd2;
-
- if (iommu->irtcachedis_enabled)
- return;
-
- build_inv_irt(&cmd, devid);
- data = atomic64_add_return(1, &iommu->cmd_sem_val);
- build_completion_wait(&cmd2, iommu, data);
-
- raw_spin_lock_irqsave(&iommu->lock, flags);
- ret = __iommu_queue_command_sync(iommu, &cmd, true);
- if (ret)
- goto out;
- ret = __iommu_queue_command_sync(iommu, &cmd2, false);
- if (ret)
- goto out;
- wait_on_sem(iommu, data);
-out:
- raw_spin_unlock_irqrestore(&iommu->lock, flags);
-}
-
void iommu_flush_all_caches(struct amd_iommu *iommu)
{
if (iommu_feature(iommu, FEATURE_IA)) {
static struct irq_chip amd_ir_chip;
static DEFINE_SPINLOCK(iommu_table_lock);
+static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
+{
+ int ret;
+ u64 data;
+ unsigned long flags;
+ struct iommu_cmd cmd, cmd2;
+
+ if (iommu->irtcachedis_enabled)
+ return;
+
+ build_inv_irt(&cmd, devid);
+ data = atomic64_add_return(1, &iommu->cmd_sem_val);
+ build_completion_wait(&cmd2, iommu, data);
+
+ raw_spin_lock_irqsave(&iommu->lock, flags);
+ ret = __iommu_queue_command_sync(iommu, &cmd, true);
+ if (ret)
+ goto out;
+ ret = __iommu_queue_command_sync(iommu, &cmd2, false);
+ if (ret)
+ goto out;
+ wait_on_sem(iommu, data);
+out:
+ raw_spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,
struct irq_remap_table *table)
{