2 * Contains CPU specific errata definitions
4 * Copyright (C) 2014 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/arm-smccc.h>
20 #include <linux/psci.h>
21 #include <linux/types.h>
23 #include <asm/cputype.h>
24 #include <asm/cpufeature.h>
26 static bool __maybe_unused
27 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
29 const struct arm64_midr_revidr *fix;
30 u32 midr = read_cpuid_id(), revidr;
32 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
33 if (!is_midr_in_range(midr, &entry->midr_range))
36 midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
37 revidr = read_cpuid(REVIDR_EL1);
38 for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
39 if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
45 static bool __maybe_unused
46 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
49 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
50 return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
53 static bool __maybe_unused
54 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
58 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
60 model = read_cpuid_id();
61 model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
62 MIDR_ARCHITECTURE_MASK;
64 return model == entry->midr_range.model;
68 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
71 u64 mask = CTR_CACHE_MINLINE_MASK;
73 /* Skip matching the min line sizes for cache type check */
74 if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
75 mask ^= arm64_ftr_reg_ctrel0.strict_mask;
77 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
78 return (read_cpuid_cachetype() & mask) !=
79 (arm64_ftr_reg_ctrel0.sys_val & mask);
83 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
85 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
88 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
90 #include <asm/mmu_context.h>
91 #include <asm/cacheflush.h>
93 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
95 #ifdef CONFIG_KVM_INDIRECT_VECTORS
96 extern char __smccc_workaround_1_smc_start[];
97 extern char __smccc_workaround_1_smc_end[];
99 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
100 const char *hyp_vecs_end)
102 void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
105 for (i = 0; i < SZ_2K; i += 0x80)
106 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
108 __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
111 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
112 const char *hyp_vecs_start,
113 const char *hyp_vecs_end)
115 static DEFINE_SPINLOCK(bp_lock);
119 for_each_possible_cpu(cpu) {
120 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
121 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
127 slot = atomic_inc_return(&arm64_el2_vector_last_slot);
128 BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
129 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
132 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
133 __this_cpu_write(bp_hardening_data.fn, fn);
134 spin_unlock(&bp_lock);
137 #define __smccc_workaround_1_smc_start NULL
138 #define __smccc_workaround_1_smc_end NULL
140 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
141 const char *hyp_vecs_start,
142 const char *hyp_vecs_end)
144 __this_cpu_write(bp_hardening_data.fn, fn);
146 #endif /* CONFIG_KVM_INDIRECT_VECTORS */
148 #include <uapi/linux/psci.h>
149 #include <linux/arm-smccc.h>
150 #include <linux/psci.h>
152 static void call_smc_arch_workaround_1(void)
154 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
157 static void call_hvc_arch_workaround_1(void)
159 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
162 static void qcom_link_stack_sanitization(void)
166 asm volatile("mov %0, x30 \n"
174 static bool __nospectre_v2;
175 static int __init parse_nospectre_v2(char *str)
177 __nospectre_v2 = true;
180 early_param("nospectre_v2", parse_nospectre_v2);
184 * 0: No workaround required
185 * 1: Workaround installed
187 static int detect_harden_bp_fw(void)
189 bp_hardening_cb_t cb;
190 void *smccc_start, *smccc_end;
191 struct arm_smccc_res res;
192 u32 midr = read_cpuid_id();
194 if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
197 switch (psci_ops.conduit) {
198 case PSCI_CONDUIT_HVC:
199 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
200 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
203 cb = call_hvc_arch_workaround_1;
204 /* This is a guest, no need to patch KVM vectors */
209 case PSCI_CONDUIT_SMC:
210 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
211 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
214 cb = call_smc_arch_workaround_1;
215 smccc_start = __smccc_workaround_1_smc_start;
216 smccc_end = __smccc_workaround_1_smc_end;
223 if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
224 ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
225 cb = qcom_link_stack_sanitization;
227 if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
228 install_bp_hardening_cb(cb, smccc_start, smccc_end);
233 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
235 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
237 static const struct ssbd_options {
241 { "force-on", ARM64_SSBD_FORCE_ENABLE, },
242 { "force-off", ARM64_SSBD_FORCE_DISABLE, },
243 { "kernel", ARM64_SSBD_KERNEL, },
246 static int __init ssbd_cfg(char *buf)
253 for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
254 int len = strlen(ssbd_options[i].str);
256 if (strncmp(buf, ssbd_options[i].str, len))
259 ssbd_state = ssbd_options[i].state;
265 early_param("ssbd", ssbd_cfg);
267 void __init arm64_update_smccc_conduit(struct alt_instr *alt,
268 __le32 *origptr, __le32 *updptr,
273 BUG_ON(nr_inst != 1);
275 switch (psci_ops.conduit) {
276 case PSCI_CONDUIT_HVC:
277 insn = aarch64_insn_get_hvc_value();
279 case PSCI_CONDUIT_SMC:
280 insn = aarch64_insn_get_smc_value();
286 *updptr = cpu_to_le32(insn);
289 void __init arm64_enable_wa2_handling(struct alt_instr *alt,
290 __le32 *origptr, __le32 *updptr,
293 BUG_ON(nr_inst != 1);
295 * Only allow mitigation on EL1 entry/exit and guest
296 * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
299 if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
300 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
303 void arm64_set_ssbd_mitigation(bool state)
305 if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
306 pr_info_once("SSBD disabled by kernel configuration\n");
310 if (this_cpu_has_cap(ARM64_SSBS)) {
312 asm volatile(SET_PSTATE_SSBS(0));
314 asm volatile(SET_PSTATE_SSBS(1));
318 switch (psci_ops.conduit) {
319 case PSCI_CONDUIT_HVC:
320 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
323 case PSCI_CONDUIT_SMC:
324 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
333 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
336 struct arm_smccc_res res;
337 bool required = true;
340 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
342 if (this_cpu_has_cap(ARM64_SSBS)) {
347 if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
348 ssbd_state = ARM64_SSBD_UNKNOWN;
352 switch (psci_ops.conduit) {
353 case PSCI_CONDUIT_HVC:
354 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
355 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
358 case PSCI_CONDUIT_SMC:
359 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
360 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
364 ssbd_state = ARM64_SSBD_UNKNOWN;
371 case SMCCC_RET_NOT_SUPPORTED:
372 ssbd_state = ARM64_SSBD_UNKNOWN;
375 case SMCCC_RET_NOT_REQUIRED:
376 pr_info_once("%s mitigation not required\n", entry->desc);
377 ssbd_state = ARM64_SSBD_MITIGATED;
380 case SMCCC_RET_SUCCESS:
384 case 1: /* Mitigation not required on this CPU */
393 switch (ssbd_state) {
394 case ARM64_SSBD_FORCE_DISABLE:
395 arm64_set_ssbd_mitigation(false);
399 case ARM64_SSBD_KERNEL:
401 __this_cpu_write(arm64_ssbd_callback_required, 1);
402 arm64_set_ssbd_mitigation(true);
406 case ARM64_SSBD_FORCE_ENABLE:
407 arm64_set_ssbd_mitigation(true);
417 switch (ssbd_state) {
418 case ARM64_SSBD_FORCE_DISABLE:
419 pr_info_once("%s disabled from command-line\n", entry->desc);
422 case ARM64_SSBD_FORCE_ENABLE:
423 pr_info_once("%s forced from command-line\n", entry->desc);
430 #ifdef CONFIG_ARM64_ERRATUM_1463225
431 DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
434 has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
437 u32 midr = read_cpuid_id();
438 /* Cortex-A76 r0p0 - r3p1 */
439 struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
441 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
442 return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
446 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
447 .matches = is_affected_midr_range, \
448 .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
450 #define CAP_MIDR_ALL_VERSIONS(model) \
451 .matches = is_affected_midr_range, \
452 .midr_range = MIDR_ALL_VERSIONS(model)
454 #define MIDR_FIXED(rev, revidr_mask) \
455 .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
457 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
458 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
459 CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
461 #define CAP_MIDR_RANGE_LIST(list) \
462 .matches = is_affected_midr_range_list, \
463 .midr_range_list = list
465 /* Errata affecting a range of revisions of given model variant */
466 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
467 ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
469 /* Errata affecting a single variant/revision of a model */
470 #define ERRATA_MIDR_REV(model, var, rev) \
471 ERRATA_MIDR_RANGE(model, var, rev, var, rev)
473 /* Errata affecting all variants/revisions of a given a model */
474 #define ERRATA_MIDR_ALL_VERSIONS(model) \
475 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
476 CAP_MIDR_ALL_VERSIONS(model)
478 /* Errata affecting a list of midr ranges, with same work around */
479 #define ERRATA_MIDR_RANGE_LIST(midr_list) \
480 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
481 CAP_MIDR_RANGE_LIST(midr_list)
483 /* Track overall mitigation state. We are only mitigated if all cores are ok */
484 static bool __hardenbp_enab = true;
485 static bool __spectrev2_safe = true;
488 * Generic helper for handling capabilties with multiple (match,enable) pairs
489 * of call backs, sharing the same capability bit.
490 * Iterate over each entry to see if at least one matches.
492 static bool __maybe_unused
493 multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, int scope)
495 const struct arm64_cpu_capabilities *caps;
497 for (caps = entry->match_list; caps->matches; caps++)
498 if (caps->matches(caps, scope))
505 * Take appropriate action for all matching entries in the shared capability
508 static void __maybe_unused
509 multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
511 const struct arm64_cpu_capabilities *caps;
513 for (caps = entry->match_list; caps->matches; caps++)
514 if (caps->matches(caps, SCOPE_LOCAL_CPU) &&
516 caps->cpu_enable(caps);
520 * List of CPUs that do not need any Spectre-v2 mitigation at all.
522 static const struct midr_range spectre_v2_safe_list[] = {
523 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
524 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
525 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
530 * Track overall bp hardening for all heterogeneous cores in the machine.
531 * We are only considered "safe" if all booted cores are known safe.
533 static bool __maybe_unused
534 check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
538 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
540 /* If the CPU has CSV2 set, we're safe */
541 if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
542 ID_AA64PFR0_CSV2_SHIFT))
545 /* Alternatively, we have a list of unaffected CPUs */
546 if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
549 /* Fallback to firmware detection */
550 need_wa = detect_harden_bp_fw();
554 __spectrev2_safe = false;
556 if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
557 pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
558 __hardenbp_enab = false;
563 if (__nospectre_v2) {
564 pr_info_once("spectrev2 mitigation disabled by command line option\n");
565 __hardenbp_enab = false;
570 pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
571 __hardenbp_enab = false;
574 return (need_wa > 0);
577 #ifdef CONFIG_HARDEN_EL2_VECTORS
579 static const struct midr_range arm64_harden_el2_vectors[] = {
580 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
581 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
587 const struct arm64_cpu_capabilities arm64_errata[] = {
588 #if defined(CONFIG_ARM64_ERRATUM_826319) || \
589 defined(CONFIG_ARM64_ERRATUM_827319) || \
590 defined(CONFIG_ARM64_ERRATUM_824069)
592 /* Cortex-A53 r0p[012] */
593 .desc = "ARM errata 826319, 827319, 824069",
594 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
595 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
596 .cpu_enable = cpu_enable_cache_maint_trap,
599 #ifdef CONFIG_ARM64_ERRATUM_819472
601 /* Cortex-A53 r0p[01] */
602 .desc = "ARM errata 819472",
603 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
604 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
605 .cpu_enable = cpu_enable_cache_maint_trap,
608 #ifdef CONFIG_ARM64_ERRATUM_832075
610 /* Cortex-A57 r0p0 - r1p2 */
611 .desc = "ARM erratum 832075",
612 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
613 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
618 #ifdef CONFIG_ARM64_ERRATUM_834220
620 /* Cortex-A57 r0p0 - r1p2 */
621 .desc = "ARM erratum 834220",
622 .capability = ARM64_WORKAROUND_834220,
623 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
628 #ifdef CONFIG_ARM64_ERRATUM_843419
630 /* Cortex-A53 r0p[01234] */
631 .desc = "ARM erratum 843419",
632 .capability = ARM64_WORKAROUND_843419,
633 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
634 MIDR_FIXED(0x4, BIT(8)),
637 #ifdef CONFIG_ARM64_ERRATUM_845719
639 /* Cortex-A53 r0p[01234] */
640 .desc = "ARM erratum 845719",
641 .capability = ARM64_WORKAROUND_845719,
642 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
645 #ifdef CONFIG_CAVIUM_ERRATUM_23154
647 /* Cavium ThunderX, pass 1.x */
648 .desc = "Cavium erratum 23154",
649 .capability = ARM64_WORKAROUND_CAVIUM_23154,
650 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
653 #ifdef CONFIG_CAVIUM_ERRATUM_27456
655 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
656 .desc = "Cavium erratum 27456",
657 .capability = ARM64_WORKAROUND_CAVIUM_27456,
658 ERRATA_MIDR_RANGE(MIDR_THUNDERX,
663 /* Cavium ThunderX, T81 pass 1.0 */
664 .desc = "Cavium erratum 27456",
665 .capability = ARM64_WORKAROUND_CAVIUM_27456,
666 ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
669 #ifdef CONFIG_CAVIUM_ERRATUM_30115
671 /* Cavium ThunderX, T88 pass 1.x - 2.2 */
672 .desc = "Cavium erratum 30115",
673 .capability = ARM64_WORKAROUND_CAVIUM_30115,
674 ERRATA_MIDR_RANGE(MIDR_THUNDERX,
679 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
680 .desc = "Cavium erratum 30115",
681 .capability = ARM64_WORKAROUND_CAVIUM_30115,
682 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
685 /* Cavium ThunderX, T83 pass 1.0 */
686 .desc = "Cavium erratum 30115",
687 .capability = ARM64_WORKAROUND_CAVIUM_30115,
688 ERRATA_MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
692 .desc = "Mismatched cache line size",
693 .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
694 .matches = has_mismatched_cache_type,
695 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
696 .cpu_enable = cpu_enable_trap_ctr_access,
699 .desc = "Mismatched cache type",
700 .capability = ARM64_MISMATCHED_CACHE_TYPE,
701 .matches = has_mismatched_cache_type,
702 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
703 .cpu_enable = cpu_enable_trap_ctr_access,
705 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
707 .desc = "Qualcomm Technologies Falkor erratum 1003",
708 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
709 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
712 .desc = "Qualcomm Technologies Kryo erratum 1003",
713 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
714 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
715 .midr_range.model = MIDR_QCOM_KRYO,
716 .matches = is_kryo_midr,
719 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
721 .desc = "Qualcomm Technologies Falkor erratum 1009",
722 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
723 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
726 #ifdef CONFIG_ARM64_ERRATUM_858921
728 /* Cortex-A73 all versions */
729 .desc = "ARM erratum 858921",
730 .capability = ARM64_WORKAROUND_858921,
731 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
735 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
736 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
737 .matches = check_branch_predictor,
739 #ifdef CONFIG_HARDEN_EL2_VECTORS
741 .desc = "EL2 vector hardening",
742 .capability = ARM64_HARDEN_EL2_VECTORS,
743 ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
747 .desc = "Speculative Store Bypass Disable",
748 .capability = ARM64_SSBD,
749 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
750 .matches = has_ssbd_mitigation,
752 #ifdef CONFIG_ARM64_ERRATUM_1463225
754 .desc = "ARM erratum 1463225",
755 .capability = ARM64_WORKAROUND_1463225,
756 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
757 .matches = has_cortex_a76_erratum_1463225,
764 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
767 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
770 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
773 if (__spectrev2_safe)
774 return sprintf(buf, "Not affected\n");
777 return sprintf(buf, "Mitigation: Branch predictor hardening\n");
779 return sprintf(buf, "Vulnerable\n");