#include "amd_iommu_types.h"
------- extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
------- extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
------- extern void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid);
------- extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
-- extern void amd_iommu_restart_ga_log(struct amd_iommu *iommu);
----- extern int amd_iommu_init_devices(void);
----- extern void amd_iommu_uninit_devices(void);
----- extern void amd_iommu_init_notifier(void);
------- extern void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid);
+++++++ irqreturn_t amd_iommu_int_thread(int irq, void *data);
+++++++ irqreturn_t amd_iommu_int_handler(int irq, void *data);
+++++++ void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid);
+++++++ void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
++++++++void amd_iommu_restart_ga_log(struct amd_iommu *iommu);
+++++++ int amd_iommu_init_devices(void);
+++++++ void amd_iommu_uninit_devices(void);
+++++++ void amd_iommu_init_notifier(void);
+++++++ void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid);
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
mode = PAGE_MODE_NONE;
} else if (type == IOMMU_DOMAIN_UNMANAGED) {
pgtable = AMD_IOMMU_V1;
++++++ } else if (type == IOMMU_DOMAIN_DMA || type == IOMMU_DOMAIN_DMA_FQ) {
++++++ pgtable = amd_iommu_pgtable;
++++++ } else {
++++++ return NULL;
}
+ ++++++ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ ++++++ if (!domain)
+ ++++++ return NULL;
+ ++++++
switch (pgtable) {
case AMD_IOMMU_V1:
ret = protection_domain_init_v1(domain, mode);
int amd_iommu_update_ga(int cpu, bool is_run, void *data)
{
------- unsigned long flags;
------- struct amd_iommu *iommu;
------- struct irq_remap_table *table;
struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
------- int devid = ir_data->irq_2_irte.devid;
struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
------- struct irte_ga *ref = (struct irte_ga *) ir_data->ref;
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
------- !ref || !entry || !entry->lo.fields_vapic.guest_mode)
+++++++ !entry || !entry->lo.fields_vapic.guest_mode)
return 0;
------- iommu = ir_data->iommu;
------- if (!iommu)
----- return -ENODEV;
-----
----- table = get_irq_table(iommu, devid);
----- if (!table)
+++++++ if (!ir_data->iommu)
return -ENODEV;
-- table = get_irq_table(iommu, devid);
-- if (!table)
-- return -ENODEV;
--
------- raw_spin_lock_irqsave(&table->lock, flags);
-------
------- if (ref->lo.fields_vapic.guest_mode) {
------- if (cpu >= 0) {
------- ref->lo.fields_vapic.destination =
------- APICID_TO_IRTE_DEST_LO(cpu);
------- ref->hi.fields.destination =
------- APICID_TO_IRTE_DEST_HI(cpu);
------- }
------- ref->lo.fields_vapic.is_run = is_run;
------- barrier();
+++++++ if (cpu >= 0) {
+++++++ entry->lo.fields_vapic.destination =
+++++++ APICID_TO_IRTE_DEST_LO(cpu);
+++++++ entry->hi.fields.destination =
+++++++ APICID_TO_IRTE_DEST_HI(cpu);
}
+++++++ entry->lo.fields_vapic.is_run = is_run;
------- raw_spin_unlock_irqrestore(&table->lock, flags);
-------
------- iommu_flush_irt(iommu, devid);
------- iommu_completion_wait(iommu);
------- return 0;
+++++++ return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
+++++++ ir_data->irq_2_irte.index, entry);
}
EXPORT_SYMBOL(amd_iommu_update_ga);
#endif