Merge branch 'for-next/spectre-bhb' into for-next/core
[platform/kernel/linux-starfive.git] / arch / arm64 / kernel / proton-pack.c
index ea78f0b..5777929 100644 (file)
  */
 
 #include <linux/arm-smccc.h>
+#include <linux/bpf.h>
 #include <linux/cpu.h>
 #include <linux/device.h>
 #include <linux/nospec.h>
 #include <linux/prctl.h>
 #include <linux/sched/task_stack.h>
 
+#include <asm/debug-monitors.h>
 #include <asm/insn.h>
 #include <asm/spectre.h>
 #include <asm/traps.h>
+#include <asm/vectors.h>
 #include <asm/virt.h>
 
 /*
@@ -96,14 +99,51 @@ static bool spectre_v2_mitigations_off(void)
        return ret;
 }
 
+static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
+{
+       switch (bhb_state) {
+       case SPECTRE_UNAFFECTED:
+               return "";
+       default:
+       case SPECTRE_VULNERABLE:
+               return ", but not BHB";
+       case SPECTRE_MITIGATED:
+               return ", BHB";
+       }
+}
+
+static bool _unprivileged_ebpf_enabled(void)
+{
+#ifdef CONFIG_BPF_SYSCALL
+       return !sysctl_unprivileged_bpf_disabled;
+#else
+       return false;
+#endif
+}
+
 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
                            char *buf)
 {
+       enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
+       const char *bhb_str = get_bhb_affected_string(bhb_state);
+       const char *v2_str = "Branch predictor hardening";
+
        switch (spectre_v2_state) {
        case SPECTRE_UNAFFECTED:
-               return sprintf(buf, "Not affected\n");
+               if (bhb_state == SPECTRE_UNAFFECTED)
+                       return sprintf(buf, "Not affected\n");
+
+               /*
+                * Platforms affected by Spectre-BHB can't report
+                * "Not affected" for Spectre-v2.
+                */
+               v2_str = "CSV2";
+               fallthrough;
        case SPECTRE_MITIGATED:
-               return sprintf(buf, "Mitigation: Branch predictor hardening\n");
+               if (bhb_state == SPECTRE_MITIGATED && _unprivileged_ebpf_enabled())
+                       return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
+
+               return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
        case SPECTRE_VULNERABLE:
                fallthrough;
        default:
@@ -557,9 +597,9 @@ void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
  * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
  * to call into firmware to adjust the mitigation state.
  */
-void __init spectre_v4_patch_fw_mitigation_conduit(struct alt_instr *alt,
-                                                  __le32 *origptr,
-                                                  __le32 *updptr, int nr_inst)
+void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt,
+                                              __le32 *origptr,
+                                              __le32 *updptr, int nr_inst)
 {
        u32 insn;
 
@@ -773,3 +813,344 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
                return -ENODEV;
        }
 }
+
+/*
+ * Spectre BHB.
+ *
+ * A CPU is either:
+ * - Mitigated by a branchy loop a CPU specific number of times, and listed
+ *   in our "loop mitigated list".
+ * - Mitigated in software by the firmware Spectre v2 call.
+ * - Has the ClearBHB instruction to perform the mitigation.
+ * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
+ *   software mitigation in the vectors is needed.
+ * - Has CSV2.3, so is unaffected.
+ */
+static enum mitigation_state spectre_bhb_state;
+
+enum mitigation_state arm64_get_spectre_bhb_state(void)
+{
+       return spectre_bhb_state;
+}
+
+enum bhb_mitigation_bits {
+       BHB_LOOP,
+       BHB_FW,
+       BHB_HW,
+       BHB_INSN,
+};
+static unsigned long system_bhb_mitigations;
+
+/*
+ * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
+ * SCOPE_SYSTEM call will give the right answer.
+ */
+u8 spectre_bhb_loop_affected(int scope)
+{
+       u8 k = 0;
+       static u8 max_bhb_k;
+
+       if (scope == SCOPE_LOCAL_CPU) {
+               static const struct midr_range spectre_bhb_k32_list[] = {
+                       MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
+                       MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
+                       MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
+                       MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
+                       MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
+                       MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
+                       MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
+                       {},
+               };
+               static const struct midr_range spectre_bhb_k24_list[] = {
+                       MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
+                       MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
+                       MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
+                       {},
+               };
+               static const struct midr_range spectre_bhb_k8_list[] = {
+                       MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+                       MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+                       {},
+               };
+
+               if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
+                       k = 32;
+               else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
+                       k = 24;
+               else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
+                       k =  8;
+
+               max_bhb_k = max(max_bhb_k, k);
+       } else {
+               k = max_bhb_k;
+       }
+
+       return k;
+}
+
+static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
+{
+       int ret;
+       struct arm_smccc_res res;
+
+       arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+                            ARM_SMCCC_ARCH_WORKAROUND_3, &res);
+
+       ret = res.a0;
+       switch (ret) {
+       case SMCCC_RET_SUCCESS:
+               return SPECTRE_MITIGATED;
+       case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
+               return SPECTRE_UNAFFECTED;
+       default:
+               fallthrough;
+       case SMCCC_RET_NOT_SUPPORTED:
+               return SPECTRE_VULNERABLE;
+       }
+}
+
+static bool is_spectre_bhb_fw_affected(int scope)
+{
+       static bool system_affected;
+       enum mitigation_state fw_state;
+       bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
+       static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+               {},
+       };
+       bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
+                                        spectre_bhb_firmware_mitigated_list);
+
+       if (scope != SCOPE_LOCAL_CPU)
+               return system_affected;
+
+       fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
+       if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
+               system_affected = true;
+               return true;
+       }
+
+       return false;
+}
+
+static bool supports_ecbhb(int scope)
+{
+       u64 mmfr1;
+
+       if (scope == SCOPE_LOCAL_CPU)
+               mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
+       else
+               mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
+
+       return cpuid_feature_extract_unsigned_field(mmfr1,
+                                                   ID_AA64MMFR1_ECBHB_SHIFT);
+}
+
+bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
+                            int scope)
+{
+       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+       if (supports_csv2p3(scope))
+               return false;
+
+       if (supports_clearbhb(scope))
+               return true;
+
+       if (spectre_bhb_loop_affected(scope))
+               return true;
+
+       if (is_spectre_bhb_fw_affected(scope))
+               return true;
+
+       return false;
+}
+
+static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
+{
+       const char *v = arm64_get_bp_hardening_vector(slot);
+
+       if (slot < 0)
+               return;
+
+       __this_cpu_write(this_cpu_vector, v);
+
+       /*
+        * When KPTI is in use, the vectors are switched when exiting to
+        * user-space.
+        */
+       if (arm64_kernel_unmapped_at_el0())
+               return;
+
+       write_sysreg(v, vbar_el1);
+       isb();
+}
+
+void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
+{
+       bp_hardening_cb_t cpu_cb;
+       enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
+       struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
+
+       if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
+               return;
+
+       if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) {
+               /* No point mitigating Spectre-BHB alone. */
+       } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
+               pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
+       } else if (cpu_mitigations_off()) {
+               pr_info_once("spectre-bhb mitigation disabled by command line option\n");
+       } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
+               state = SPECTRE_MITIGATED;
+               set_bit(BHB_HW, &system_bhb_mitigations);
+       } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
+               /*
+                * Ensure KVM uses the indirect vector which will have ClearBHB
+                * added.
+                */
+               if (!data->slot)
+                       data->slot = HYP_VECTOR_INDIRECT;
+
+               this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
+               state = SPECTRE_MITIGATED;
+               set_bit(BHB_INSN, &system_bhb_mitigations);
+       } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
+               /*
+                * Ensure KVM uses the indirect vector which will have the
+                * branchy-loop added. A57/A72-r0 will already have selected
+                * the spectre-indirect vector, which is sufficient for BHB
+                * too.
+                */
+               if (!data->slot)
+                       data->slot = HYP_VECTOR_INDIRECT;
+
+               this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
+               state = SPECTRE_MITIGATED;
+               set_bit(BHB_LOOP, &system_bhb_mitigations);
+       } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
+               fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
+               if (fw_state == SPECTRE_MITIGATED) {
+                       /*
+                        * Ensure KVM uses one of the spectre bp_hardening
+                        * vectors. The indirect vector doesn't include the EL3
+                        * call, so needs upgrading to
+                        * HYP_VECTOR_SPECTRE_INDIRECT.
+                        */
+                       if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
+                               data->slot += 1;
+
+                       this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
+
+                       /*
+                        * The WA3 call in the vectors supersedes the WA1 call
+                        * made during context-switch. Uninstall any firmware
+                        * bp_hardening callback.
+                        */
+                       cpu_cb = spectre_v2_get_sw_mitigation_cb();
+                       if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
+                               __this_cpu_write(bp_hardening_data.fn, NULL);
+
+                       state = SPECTRE_MITIGATED;
+                       set_bit(BHB_FW, &system_bhb_mitigations);
+               }
+       }
+
+       update_mitigation_state(&spectre_bhb_state, state);
+}
+
+/* Patched to NOP when enabled */
+void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
+                                                    __le32 *origptr,
+                                                     __le32 *updptr, int nr_inst)
+{
+       BUG_ON(nr_inst != 1);
+
+       if (test_bit(BHB_LOOP, &system_bhb_mitigations))
+               *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
+}
+
+/* Patched to NOP when enabled */
+void noinstr spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt,
+                                                  __le32 *origptr,
+                                                  __le32 *updptr, int nr_inst)
+{
+       BUG_ON(nr_inst != 1);
+
+       if (test_bit(BHB_FW, &system_bhb_mitigations))
+               *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
+}
+
+/* Patched to correct the immediate */
+void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
+                                  __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+       u8 rd;
+       u32 insn;
+       u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
+
+       BUG_ON(nr_inst != 1); /* MOV -> MOV */
+
+       if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
+               return;
+
+       insn = le32_to_cpu(*origptr);
+       rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
+       insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
+                                        AARCH64_INSN_VARIANT_64BIT,
+                                        AARCH64_INSN_MOVEWIDE_ZERO);
+       *updptr++ = cpu_to_le32(insn);
+}
+
+/* Patched to mov WA3 when supported */
+void noinstr spectre_bhb_patch_wa3(struct alt_instr *alt,
+                                  __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+       u8 rd;
+       u32 insn;
+
+       BUG_ON(nr_inst != 1); /* MOV -> MOV */
+
+       if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
+           !test_bit(BHB_FW, &system_bhb_mitigations))
+               return;
+
+       insn = le32_to_cpu(*origptr);
+       rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
+
+       insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_ORR,
+                                                 AARCH64_INSN_VARIANT_32BIT,
+                                                 AARCH64_INSN_REG_ZR, rd,
+                                                 ARM_SMCCC_ARCH_WORKAROUND_3);
+       if (WARN_ON_ONCE(insn == AARCH64_BREAK_FAULT))
+               return;
+
+       *updptr++ = cpu_to_le32(insn);
+}
+
+/* Patched to NOP when not supported */
+void __init spectre_bhb_patch_clearbhb(struct alt_instr *alt,
+                                  __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+       BUG_ON(nr_inst != 2);
+
+       if (test_bit(BHB_INSN, &system_bhb_mitigations))
+               return;
+
+       *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
+       *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
+}
+
+#ifdef CONFIG_BPF_SYSCALL
+#define EBPF_WARN "Unprivileged eBPF is enabled, data leaks possible via Spectre v2 BHB attacks!\n"
+void unpriv_ebpf_notify(int new_state)
+{
+       if (spectre_v2_state == SPECTRE_VULNERABLE ||
+           spectre_bhb_state != SPECTRE_MITIGATED)
+               return;
+
+       if (!new_state)
+               pr_err("WARNING: %s", EBPF_WARN);
+}
+#endif