goto out_clear_smmu;
}
- if (smmu_domain->non_strict)
- pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
-
if (smmu_domain->pgtbl_cfg.quirks)
pgtbl_cfg.quirks |= smmu_domain->pgtbl_cfg.quirks;
break;
case IOMMU_DOMAIN_DMA:
switch (attr) {
- case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
- *(int *)data = smmu_domain->non_strict;
+ case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: {
+ bool non_strict = smmu_domain->pgtbl_cfg.quirks &
+ IO_PGTABLE_QUIRK_NON_STRICT;
+ *(int *)data = non_strict;
return 0;
+ }
default:
return -ENODEV;
}
case IOMMU_DOMAIN_DMA:
switch (attr) {
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
- smmu_domain->non_strict = *(int *)data;
+ if (*(int *)data)
+ smmu_domain->pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
+ else
+ smmu_domain->pgtbl_cfg.quirks &= ~IO_PGTABLE_QUIRK_NON_STRICT;
break;
default:
ret = -ENODEV;
const struct iommu_flush_ops *flush_ops;
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
- bool non_strict;
struct mutex init_mutex; /* Protects smmu pointer */
spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
struct iommu_domain domain;