2 * Contains CPU specific errata definitions
4 * Copyright (C) 2014 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/arm-smccc.h>
20 #include <linux/psci.h>
21 #include <linux/types.h>
23 #include <asm/cputype.h>
24 #include <asm/cpufeature.h>
26 static bool __maybe_unused
27 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
29 const struct arm64_midr_revidr *fix;
30 u32 midr = read_cpuid_id(), revidr;
32 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
33 if (!is_midr_in_range(midr, &entry->midr_range))
36 midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
37 revidr = read_cpuid(REVIDR_EL1);
38 for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
39 if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
45 static bool __maybe_unused
46 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
49 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
50 return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
53 static bool __maybe_unused
54 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
58 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
60 model = read_cpuid_id();
61 model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
62 MIDR_ARCHITECTURE_MASK;
64 return model == entry->midr_range.model;
68 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
71 u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
72 u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
73 u64 ctr_raw, ctr_real;
75 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
78 * We want to make sure that all the CPUs in the system expose
79 * a consistent CTR_EL0 to make sure that applications behaves
80 * correctly with migration.
82 * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
84 * 1) It is safe if the system doesn't support IDC, as CPU anyway
85 * reports IDC = 0, consistent with the rest.
87 * 2) If the system has IDC, it is still safe as we trap CTR_EL0
88 * access on this CPU via the ARM64_HAS_CACHE_IDC capability.
90 * So, we need to make sure either the raw CTR_EL0 or the effective
91 * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
93 ctr_raw = read_cpuid_cachetype() & mask;
94 ctr_real = read_cpuid_effective_cachetype() & mask;
96 return (ctr_real != sys) && (ctr_raw != sys);
100 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
102 u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
104 /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
105 if ((read_cpuid_cachetype() & mask) !=
106 (arm64_ftr_reg_ctrel0.sys_val & mask))
107 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
110 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
112 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
113 #include <asm/mmu_context.h>
114 #include <asm/cacheflush.h>
116 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
118 #ifdef CONFIG_KVM_INDIRECT_VECTORS
119 extern char __smccc_workaround_1_smc_start[];
120 extern char __smccc_workaround_1_smc_end[];
122 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
123 const char *hyp_vecs_end)
125 void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
128 for (i = 0; i < SZ_2K; i += 0x80)
129 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
131 __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
134 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
135 const char *hyp_vecs_start,
136 const char *hyp_vecs_end)
138 static DEFINE_SPINLOCK(bp_lock);
142 * enable_smccc_arch_workaround_1() passes NULL for the hyp_vecs
143 * start/end if we're a guest. Skip the hyp-vectors work.
145 if (!hyp_vecs_start) {
146 __this_cpu_write(bp_hardening_data.fn, fn);
151 for_each_possible_cpu(cpu) {
152 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
153 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
159 slot = atomic_inc_return(&arm64_el2_vector_last_slot);
160 BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
161 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
164 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
165 __this_cpu_write(bp_hardening_data.fn, fn);
166 spin_unlock(&bp_lock);
169 #define __smccc_workaround_1_smc_start NULL
170 #define __smccc_workaround_1_smc_end NULL
172 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
173 const char *hyp_vecs_start,
174 const char *hyp_vecs_end)
176 __this_cpu_write(bp_hardening_data.fn, fn);
178 #endif /* CONFIG_KVM_INDIRECT_VECTORS */
180 static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
181 bp_hardening_cb_t fn,
182 const char *hyp_vecs_start,
183 const char *hyp_vecs_end)
187 if (!entry->matches(entry, SCOPE_LOCAL_CPU))
190 pfr0 = read_cpuid(ID_AA64PFR0_EL1);
191 if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
194 __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
197 #include <uapi/linux/psci.h>
198 #include <linux/arm-smccc.h>
199 #include <linux/psci.h>
201 static void call_smc_arch_workaround_1(void)
203 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
206 static void call_hvc_arch_workaround_1(void)
208 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
211 static void qcom_link_stack_sanitization(void)
215 asm volatile("mov %0, x30 \n"
224 enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
226 bp_hardening_cb_t cb;
227 void *smccc_start, *smccc_end;
228 struct arm_smccc_res res;
229 u32 midr = read_cpuid_id();
231 if (!entry->matches(entry, SCOPE_LOCAL_CPU))
234 if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
237 switch (psci_ops.conduit) {
238 case PSCI_CONDUIT_HVC:
239 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
240 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
243 cb = call_hvc_arch_workaround_1;
244 /* This is a guest, no need to patch KVM vectors */
249 case PSCI_CONDUIT_SMC:
250 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
251 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
254 cb = call_smc_arch_workaround_1;
255 smccc_start = __smccc_workaround_1_smc_start;
256 smccc_end = __smccc_workaround_1_smc_end;
263 if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
264 ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
265 cb = qcom_link_stack_sanitization;
267 install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
271 #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
273 #ifdef CONFIG_ARM64_SSBD
274 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
276 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
278 static const struct ssbd_options {
282 { "force-on", ARM64_SSBD_FORCE_ENABLE, },
283 { "force-off", ARM64_SSBD_FORCE_DISABLE, },
284 { "kernel", ARM64_SSBD_KERNEL, },
287 static int __init ssbd_cfg(char *buf)
294 for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
295 int len = strlen(ssbd_options[i].str);
297 if (strncmp(buf, ssbd_options[i].str, len))
300 ssbd_state = ssbd_options[i].state;
306 early_param("ssbd", ssbd_cfg);
308 void __init arm64_update_smccc_conduit(struct alt_instr *alt,
309 __le32 *origptr, __le32 *updptr,
314 BUG_ON(nr_inst != 1);
316 switch (psci_ops.conduit) {
317 case PSCI_CONDUIT_HVC:
318 insn = aarch64_insn_get_hvc_value();
320 case PSCI_CONDUIT_SMC:
321 insn = aarch64_insn_get_smc_value();
327 *updptr = cpu_to_le32(insn);
330 void __init arm64_enable_wa2_handling(struct alt_instr *alt,
331 __le32 *origptr, __le32 *updptr,
334 BUG_ON(nr_inst != 1);
336 * Only allow mitigation on EL1 entry/exit and guest
337 * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
340 if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
341 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
344 void arm64_set_ssbd_mitigation(bool state)
346 if (this_cpu_has_cap(ARM64_SSBS)) {
348 asm volatile(SET_PSTATE_SSBS(0));
350 asm volatile(SET_PSTATE_SSBS(1));
354 switch (psci_ops.conduit) {
355 case PSCI_CONDUIT_HVC:
356 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
359 case PSCI_CONDUIT_SMC:
360 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
369 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
372 struct arm_smccc_res res;
373 bool required = true;
376 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
378 if (this_cpu_has_cap(ARM64_SSBS)) {
383 if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
384 ssbd_state = ARM64_SSBD_UNKNOWN;
388 switch (psci_ops.conduit) {
389 case PSCI_CONDUIT_HVC:
390 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
391 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
394 case PSCI_CONDUIT_SMC:
395 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
396 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
400 ssbd_state = ARM64_SSBD_UNKNOWN;
407 case SMCCC_RET_NOT_SUPPORTED:
408 ssbd_state = ARM64_SSBD_UNKNOWN;
411 case SMCCC_RET_NOT_REQUIRED:
412 pr_info_once("%s mitigation not required\n", entry->desc);
413 ssbd_state = ARM64_SSBD_MITIGATED;
416 case SMCCC_RET_SUCCESS:
420 case 1: /* Mitigation not required on this CPU */
429 switch (ssbd_state) {
430 case ARM64_SSBD_FORCE_DISABLE:
431 arm64_set_ssbd_mitigation(false);
435 case ARM64_SSBD_KERNEL:
437 __this_cpu_write(arm64_ssbd_callback_required, 1);
438 arm64_set_ssbd_mitigation(true);
442 case ARM64_SSBD_FORCE_ENABLE:
443 arm64_set_ssbd_mitigation(true);
453 switch (ssbd_state) {
454 case ARM64_SSBD_FORCE_DISABLE:
455 pr_info_once("%s disabled from command-line\n", entry->desc);
458 case ARM64_SSBD_FORCE_ENABLE:
459 pr_info_once("%s forced from command-line\n", entry->desc);
465 #endif /* CONFIG_ARM64_SSBD */
467 static void __maybe_unused
468 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
470 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
473 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
474 .matches = is_affected_midr_range, \
475 .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
477 #define CAP_MIDR_ALL_VERSIONS(model) \
478 .matches = is_affected_midr_range, \
479 .midr_range = MIDR_ALL_VERSIONS(model)
481 #define MIDR_FIXED(rev, revidr_mask) \
482 .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
484 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
485 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
486 CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
488 #define CAP_MIDR_RANGE_LIST(list) \
489 .matches = is_affected_midr_range_list, \
490 .midr_range_list = list
492 /* Errata affecting a range of revisions of given model variant */
493 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
494 ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
496 /* Errata affecting a single variant/revision of a model */
497 #define ERRATA_MIDR_REV(model, var, rev) \
498 ERRATA_MIDR_RANGE(model, var, rev, var, rev)
500 /* Errata affecting all variants/revisions of a given a model */
501 #define ERRATA_MIDR_ALL_VERSIONS(model) \
502 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
503 CAP_MIDR_ALL_VERSIONS(model)
505 /* Errata affecting a list of midr ranges, with same work around */
506 #define ERRATA_MIDR_RANGE_LIST(midr_list) \
507 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
508 CAP_MIDR_RANGE_LIST(midr_list)
511 * Generic helper for handling capabilties with multiple (match,enable) pairs
512 * of call backs, sharing the same capability bit.
513 * Iterate over each entry to see if at least one matches.
515 static bool __maybe_unused
516 multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, int scope)
518 const struct arm64_cpu_capabilities *caps;
520 for (caps = entry->match_list; caps->matches; caps++)
521 if (caps->matches(caps, scope))
528 * Take appropriate action for all matching entries in the shared capability
531 static void __maybe_unused
532 multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
534 const struct arm64_cpu_capabilities *caps;
536 for (caps = entry->match_list; caps->matches; caps++)
537 if (caps->matches(caps, SCOPE_LOCAL_CPU) &&
539 caps->cpu_enable(caps);
542 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
545 * List of CPUs where we need to issue a psci call to
546 * harden the branch predictor.
548 static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
549 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
550 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
551 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
552 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
553 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
554 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
555 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
556 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
557 MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER),
563 #ifdef CONFIG_HARDEN_EL2_VECTORS
565 static const struct midr_range arm64_harden_el2_vectors[] = {
566 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
567 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
573 const struct arm64_cpu_capabilities arm64_errata[] = {
574 #if defined(CONFIG_ARM64_ERRATUM_826319) || \
575 defined(CONFIG_ARM64_ERRATUM_827319) || \
576 defined(CONFIG_ARM64_ERRATUM_824069)
578 /* Cortex-A53 r0p[012] */
579 .desc = "ARM errata 826319, 827319, 824069",
580 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
581 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
582 .cpu_enable = cpu_enable_cache_maint_trap,
585 #ifdef CONFIG_ARM64_ERRATUM_819472
587 /* Cortex-A53 r0p[01] */
588 .desc = "ARM errata 819472",
589 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
590 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
591 .cpu_enable = cpu_enable_cache_maint_trap,
594 #ifdef CONFIG_ARM64_ERRATUM_832075
596 /* Cortex-A57 r0p0 - r1p2 */
597 .desc = "ARM erratum 832075",
598 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
599 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
604 #ifdef CONFIG_ARM64_ERRATUM_834220
606 /* Cortex-A57 r0p0 - r1p2 */
607 .desc = "ARM erratum 834220",
608 .capability = ARM64_WORKAROUND_834220,
609 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
614 #ifdef CONFIG_ARM64_ERRATUM_843419
616 /* Cortex-A53 r0p[01234] */
617 .desc = "ARM erratum 843419",
618 .capability = ARM64_WORKAROUND_843419,
619 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
620 MIDR_FIXED(0x4, BIT(8)),
623 #ifdef CONFIG_ARM64_ERRATUM_845719
625 /* Cortex-A53 r0p[01234] */
626 .desc = "ARM erratum 845719",
627 .capability = ARM64_WORKAROUND_845719,
628 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
631 #ifdef CONFIG_CAVIUM_ERRATUM_23154
633 /* Cavium ThunderX, pass 1.x */
634 .desc = "Cavium erratum 23154",
635 .capability = ARM64_WORKAROUND_CAVIUM_23154,
636 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
639 #ifdef CONFIG_CAVIUM_ERRATUM_27456
641 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
642 .desc = "Cavium erratum 27456",
643 .capability = ARM64_WORKAROUND_CAVIUM_27456,
644 ERRATA_MIDR_RANGE(MIDR_THUNDERX,
649 /* Cavium ThunderX, T81 pass 1.0 */
650 .desc = "Cavium erratum 27456",
651 .capability = ARM64_WORKAROUND_CAVIUM_27456,
652 ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
655 #ifdef CONFIG_CAVIUM_ERRATUM_30115
657 /* Cavium ThunderX, T88 pass 1.x - 2.2 */
658 .desc = "Cavium erratum 30115",
659 .capability = ARM64_WORKAROUND_CAVIUM_30115,
660 ERRATA_MIDR_RANGE(MIDR_THUNDERX,
665 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
666 .desc = "Cavium erratum 30115",
667 .capability = ARM64_WORKAROUND_CAVIUM_30115,
668 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
671 /* Cavium ThunderX, T83 pass 1.0 */
672 .desc = "Cavium erratum 30115",
673 .capability = ARM64_WORKAROUND_CAVIUM_30115,
674 ERRATA_MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
678 .desc = "Mismatched cache type (CTR_EL0)",
679 .capability = ARM64_MISMATCHED_CACHE_TYPE,
680 .matches = has_mismatched_cache_type,
681 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
682 .cpu_enable = cpu_enable_trap_ctr_access,
684 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
686 .desc = "Qualcomm Technologies Falkor erratum 1003",
687 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
688 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
691 .desc = "Qualcomm Technologies Kryo erratum 1003",
692 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
693 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
694 .midr_range.model = MIDR_QCOM_KRYO,
695 .matches = is_kryo_midr,
698 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
700 .desc = "Qualcomm Technologies Falkor erratum 1009",
701 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
702 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
705 #ifdef CONFIG_ARM64_ERRATUM_858921
707 /* Cortex-A73 all versions */
708 .desc = "ARM erratum 858921",
709 .capability = ARM64_WORKAROUND_858921,
710 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
713 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
715 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
716 .cpu_enable = enable_smccc_arch_workaround_1,
717 ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
720 #ifdef CONFIG_HARDEN_EL2_VECTORS
722 .desc = "EL2 vector hardening",
723 .capability = ARM64_HARDEN_EL2_VECTORS,
724 ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
727 #ifdef CONFIG_ARM64_SSBD
729 .desc = "Speculative Store Bypass Disable",
730 .capability = ARM64_SSBD,
731 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
732 .matches = has_ssbd_mitigation,
735 #ifdef CONFIG_ARM64_ERRATUM_1188873
737 /* Cortex-A76 r0p0 to r2p0 */
738 .desc = "ARM erratum 1188873",
739 .capability = ARM64_WORKAROUND_1188873,
740 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),