iommu/arm-smmu-v3: Refactor arm_smmu_init_bypass_stes() to force bypass
authorShameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Wed, 15 Jun 2022 10:10:42 +0000 (11:10 +0100)
committerJoerg Roedel <jroedel@suse.de>
Wed, 6 Jul 2022 10:51:11 +0000 (12:51 +0200)
By default, disable_bypass flag is set and any dev without
an iommu domain installs STE with CFG_ABORT during
arm_smmu_init_bypass_stes(). Introduce a "force" flag and
move the STE update logic to arm_smmu_init_bypass_stes()
so that we can force it to install CFG_BYPASS STE for specific
SIDs.

This will be useful in a follow-up patch to install bypass
for IORT RMR SIDs.

Tested-by: Hanjun Guo <guohanjun@huawei.com>
Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Acked-by: Robin Murphy <robin.murphy@arm.com>
Link: https://lore.kernel.org/r/20220615101044.1972-8-shameerali.kolothum.thodi@huawei.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c

index 17d4f3432df2ae3dd5ffcdaae207b4534fcdfb14..09723861a08a41a87ae738a3d18c84a84fc979d2 100644 (file)
@@ -1380,12 +1380,21 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
                arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
 }
 
-static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent)
+static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent, bool force)
 {
        unsigned int i;
+       u64 val = STRTAB_STE_0_V;
+
+       if (disable_bypass && !force)
+               val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
+       else
+               val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
 
        for (i = 0; i < nent; ++i) {
-               arm_smmu_write_strtab_ent(NULL, -1, strtab);
+               strtab[0] = cpu_to_le64(val);
+               strtab[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
+                                                  STRTAB_STE_1_SHCFG_INCOMING));
+               strtab[2] = 0;
                strtab += STRTAB_STE_DWORDS;
        }
 }
@@ -1413,7 +1422,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
                return -ENOMEM;
        }
 
-       arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
+       arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT, false);
        arm_smmu_write_strtab_l1_desc(strtab, desc);
        return 0;
 }
@@ -3051,7 +3060,7 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
        reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
        cfg->strtab_base_cfg = reg;
 
-       arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
+       arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents, false);
        return 0;
 }