arm64: spectre: Consolidate spectre-v3a detection
authorWill Deacon <will@kernel.org>
Fri, 13 Nov 2020 11:38:46 +0000 (11:38 +0000)
committerMarc Zyngier <maz@kernel.org>
Mon, 16 Nov 2020 10:43:06 +0000 (10:43 +0000)
The spectre-v3a mitigation is split between cpu_errata.c and spectre.c,
with the former handling detection of the problem and the latter handling
enabling of the workaround.

Move the detection logic alongside the enabling logic, like we do for the
other spectre mitigations.

Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Quentin Perret <qperret@google.com>
Link: https://lore.kernel.org/r/20201113113847.21619-10-will@kernel.org
arch/arm64/include/asm/spectre.h
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/proton-pack.c

index b4df683..12a4eb5 100644 (file)
@@ -83,6 +83,7 @@ enum mitigation_state arm64_get_spectre_v2_state(void);
 bool has_spectre_v2(const struct arm64_cpu_capabilities *cap, int scope);
 void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
 
+bool has_spectre_v3a(const struct arm64_cpu_capabilities *cap, int scope);
 void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
 
 enum mitigation_state arm64_get_spectre_v4_state(void);
index 949d561..0709c82 100644 (file)
@@ -196,16 +196,6 @@ has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
        return is_midr_in_range(midr, &range) && has_dic;
 }
 
-#ifdef CONFIG_RANDOMIZE_BASE
-
-static const struct midr_range ca57_a72[] = {
-       MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
-       MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
-       {},
-};
-
-#endif
-
 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
 static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
@@ -462,7 +452,8 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
        /* Must come after the Spectre-v2 entry */
                .desc = "Spectre-v3a",
                .capability = ARM64_SPECTRE_V3A,
-               ERRATA_MIDR_RANGE_LIST(ca57_a72),
+               .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+               .matches = has_spectre_v3a,
                .cpu_enable = spectre_v3a_enable_mitigation,
        },
 #endif
index cf9f8b8..d89be98 100644 (file)
@@ -277,6 +277,18 @@ void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
  * an indirect trampoline for the hyp vectors so that guests can't read
  * VBAR_EL2 to defeat randomisation of the hypervisor VA layout.
  */
+bool has_spectre_v3a(const struct arm64_cpu_capabilities *entry, int scope)
+{
+       static const struct midr_range spectre_v3a_unsafe_list[] = {
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+               {},
+       };
+
+       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+       return is_midr_in_range_list(read_cpuid_id(), spectre_v3a_unsafe_list);
+}
+
 void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
 {
        struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);