1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/kvm/coproc.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Authors: Rusty Russell <rusty@rustcorp.com.au>
9 * Christoffer Dall <c.dall@virtualopensystems.com>
12 #include <linux/bitfield.h>
13 #include <linux/bsearch.h>
14 #include <linux/cacheinfo.h>
15 #include <linux/kvm_host.h>
17 #include <linux/printk.h>
18 #include <linux/uaccess.h>
20 #include <asm/cacheflush.h>
21 #include <asm/cputype.h>
22 #include <asm/debug-monitors.h>
24 #include <asm/kvm_arm.h>
25 #include <asm/kvm_emulate.h>
26 #include <asm/kvm_hyp.h>
27 #include <asm/kvm_mmu.h>
28 #include <asm/kvm_nested.h>
29 #include <asm/perf_event.h>
30 #include <asm/sysreg.h>
32 #include <trace/events/kvm.h>
39 * For AArch32, we only take care of what is being trapped. Anything
40 * that has to do with init and userspace access has to go via the
44 static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
45 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
48 static bool read_from_write_only(struct kvm_vcpu *vcpu,
49 struct sys_reg_params *params,
50 const struct sys_reg_desc *r)
52 WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
53 print_sys_reg_instr(params);
54 kvm_inject_undefined(vcpu);
58 static bool write_to_read_only(struct kvm_vcpu *vcpu,
59 struct sys_reg_params *params,
60 const struct sys_reg_desc *r)
62 WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
63 print_sys_reg_instr(params);
64 kvm_inject_undefined(vcpu);
68 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
70 u64 val = 0x8badf00d8badf00d;
72 if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
73 __vcpu_read_sys_reg_from_cpu(reg, &val))
76 return __vcpu_sys_reg(vcpu, reg);
79 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
81 if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
82 __vcpu_write_sys_reg_to_cpu(val, reg))
85 __vcpu_sys_reg(vcpu, reg) = val;
88 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
92 * Returns the minimum line size for the selected cache, expressed as
95 static u8 get_min_cache_line_size(bool icache)
97 u64 ctr = read_sanitised_ftr_reg(SYS_CTR_EL0);
101 field = SYS_FIELD_GET(CTR_EL0, IminLine, ctr);
103 field = SYS_FIELD_GET(CTR_EL0, DminLine, ctr);
106 * Cache line size is represented as Log2(words) in CTR_EL0.
107 * Log2(bytes) can be derived with the following:
109 * Log2(words) + 2 = Log2(bytes / 4) + 2
110 * = Log2(bytes) - 2 + 2
116 /* Which cache CCSIDR represents depends on CSSELR value. */
117 static u32 get_ccsidr(struct kvm_vcpu *vcpu, u32 csselr)
121 if (vcpu->arch.ccsidr)
122 return vcpu->arch.ccsidr[csselr];
124 line_size = get_min_cache_line_size(csselr & CSSELR_EL1_InD);
127 * Fabricate a CCSIDR value as the overriding value does not exist.
128 * The real CCSIDR value will not be used as it can vary by the
129 * physical CPU which the vcpu currently resides in.
131 * The line size is determined with get_min_cache_line_size(), which
132 * should be valid for all CPUs even if they have different cache
135 * The associativity bits are cleared, meaning the geometry of all data
136 * and unified caches (which are guaranteed to be PIPT and thus
137 * non-aliasing) are 1 set and 1 way.
138 * Guests should not be doing cache operations by set/way at all, and
139 * for this reason, we trap them and attempt to infer the intent, so
140 * that we can flush the entire guest's address space at the appropriate
141 * time. The exposed geometry minimizes the number of the traps.
142 * [If guests should attempt to infer aliasing properties from the
143 * geometry (which is not permitted by the architecture), they would
144 * only do so for virtually indexed caches.]
146 * We don't check if the cache level exists as it is allowed to return
147 * an UNKNOWN value if not.
149 return SYS_FIELD_PREP(CCSIDR_EL1, LineSize, line_size - 4);
152 static int set_ccsidr(struct kvm_vcpu *vcpu, u32 csselr, u32 val)
154 u8 line_size = FIELD_GET(CCSIDR_EL1_LineSize, val) + 4;
155 u32 *ccsidr = vcpu->arch.ccsidr;
158 if ((val & CCSIDR_EL1_RES0) ||
159 line_size < get_min_cache_line_size(csselr & CSSELR_EL1_InD))
163 if (val == get_ccsidr(vcpu, csselr))
166 ccsidr = kmalloc_array(CSSELR_MAX, sizeof(u32), GFP_KERNEL_ACCOUNT);
170 for (i = 0; i < CSSELR_MAX; i++)
171 ccsidr[i] = get_ccsidr(vcpu, i);
173 vcpu->arch.ccsidr = ccsidr;
176 ccsidr[csselr] = val;
181 static bool access_rw(struct kvm_vcpu *vcpu,
182 struct sys_reg_params *p,
183 const struct sys_reg_desc *r)
186 vcpu_write_sys_reg(vcpu, p->regval, r->reg);
188 p->regval = vcpu_read_sys_reg(vcpu, r->reg);
194 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
196 static bool access_dcsw(struct kvm_vcpu *vcpu,
197 struct sys_reg_params *p,
198 const struct sys_reg_desc *r)
201 return read_from_write_only(vcpu, p, r);
204 * Only track S/W ops if we don't have FWB. It still indicates
205 * that the guest is a bit broken (S/W operations should only
206 * be done by firmware, knowing that there is only a single
207 * CPU left in the system, and certainly not from non-secure
210 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
211 kvm_set_way_flush(vcpu);
216 static bool access_dcgsw(struct kvm_vcpu *vcpu,
217 struct sys_reg_params *p,
218 const struct sys_reg_desc *r)
220 if (!kvm_has_mte(vcpu->kvm)) {
221 kvm_inject_undefined(vcpu);
225 /* Treat MTE S/W ops as we treat the classic ones: with contempt */
226 return access_dcsw(vcpu, p, r);
229 static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
231 switch (r->aarch32_map) {
233 *mask = GENMASK_ULL(31, 0);
237 *mask = GENMASK_ULL(63, 32);
241 *mask = GENMASK_ULL(63, 0);
248 * Generic accessor for VM registers. Only called as long as HCR_TVM
249 * is set. If the guest enables the MMU, we stop trapping the VM
250 * sys_regs and leave it in complete control of the caches.
252 static bool access_vm_reg(struct kvm_vcpu *vcpu,
253 struct sys_reg_params *p,
254 const struct sys_reg_desc *r)
256 bool was_enabled = vcpu_has_cache_enabled(vcpu);
257 u64 val, mask, shift;
259 BUG_ON(!p->is_write);
261 get_access_mask(r, &mask, &shift);
264 val = vcpu_read_sys_reg(vcpu, r->reg);
270 val |= (p->regval & (mask >> shift)) << shift;
271 vcpu_write_sys_reg(vcpu, val, r->reg);
273 kvm_toggle_cache(vcpu, was_enabled);
277 static bool access_actlr(struct kvm_vcpu *vcpu,
278 struct sys_reg_params *p,
279 const struct sys_reg_desc *r)
284 return ignore_write(vcpu, p);
286 get_access_mask(r, &mask, &shift);
287 p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift;
293 * Trap handler for the GICv3 SGI generation system register.
294 * Forward the request to the VGIC emulation.
295 * The cp15_64 code makes sure this automatically works
296 * for both AArch64 and AArch32 accesses.
298 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
299 struct sys_reg_params *p,
300 const struct sys_reg_desc *r)
305 return read_from_write_only(vcpu, p, r);
308 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
309 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
310 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
311 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
314 if (p->Op0 == 0) { /* AArch32 */
316 default: /* Keep GCC quiet */
317 case 0: /* ICC_SGI1R */
320 case 1: /* ICC_ASGI1R */
321 case 2: /* ICC_SGI0R */
325 } else { /* AArch64 */
327 default: /* Keep GCC quiet */
328 case 5: /* ICC_SGI1R_EL1 */
331 case 6: /* ICC_ASGI1R_EL1 */
332 case 7: /* ICC_SGI0R_EL1 */
338 vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
343 static bool access_gic_sre(struct kvm_vcpu *vcpu,
344 struct sys_reg_params *p,
345 const struct sys_reg_desc *r)
348 return ignore_write(vcpu, p);
350 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
354 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
355 struct sys_reg_params *p,
356 const struct sys_reg_desc *r)
359 return ignore_write(vcpu, p);
361 return read_zero(vcpu, p);
364 static bool trap_undef(struct kvm_vcpu *vcpu,
365 struct sys_reg_params *p,
366 const struct sys_reg_desc *r)
368 kvm_inject_undefined(vcpu);
373 * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
374 * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
375 * system, these registers should UNDEF. LORID_EL1 being a RO register, we
376 * treat it separately.
378 static bool trap_loregion(struct kvm_vcpu *vcpu,
379 struct sys_reg_params *p,
380 const struct sys_reg_desc *r)
382 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
383 u32 sr = reg_to_encoding(r);
385 if (!(val & (0xfUL << ID_AA64MMFR1_EL1_LO_SHIFT))) {
386 kvm_inject_undefined(vcpu);
390 if (p->is_write && sr == SYS_LORID_EL1)
391 return write_to_read_only(vcpu, p, r);
393 return trap_raz_wi(vcpu, p, r);
396 static bool trap_oslar_el1(struct kvm_vcpu *vcpu,
397 struct sys_reg_params *p,
398 const struct sys_reg_desc *r)
403 return read_from_write_only(vcpu, p, r);
405 /* Forward the OSLK bit to OSLSR */
406 oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~SYS_OSLSR_OSLK;
407 if (p->regval & SYS_OSLAR_OSLK)
408 oslsr |= SYS_OSLSR_OSLK;
410 __vcpu_sys_reg(vcpu, OSLSR_EL1) = oslsr;
414 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
415 struct sys_reg_params *p,
416 const struct sys_reg_desc *r)
419 return write_to_read_only(vcpu, p, r);
421 p->regval = __vcpu_sys_reg(vcpu, r->reg);
425 static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
429 * The only modifiable bit is the OSLK bit. Refuse the write if
430 * userspace attempts to change any other bit in the register.
432 if ((val ^ rd->val) & ~SYS_OSLSR_OSLK)
435 __vcpu_sys_reg(vcpu, rd->reg) = val;
439 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
440 struct sys_reg_params *p,
441 const struct sys_reg_desc *r)
444 return ignore_write(vcpu, p);
446 p->regval = read_sysreg(dbgauthstatus_el1);
452 * We want to avoid world-switching all the DBG registers all the
455 * - If we've touched any debug register, it is likely that we're
456 * going to touch more of them. It then makes sense to disable the
457 * traps and start doing the save/restore dance
458 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
459 * then mandatory to save/restore the registers, as the guest
462 * For this, we use a DIRTY bit, indicating the guest has modified the
463 * debug registers, used as follow:
466 * - If the dirty bit is set (because we're coming back from trapping),
467 * disable the traps, save host registers, restore guest registers.
468 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
469 * set the dirty bit, disable the traps, save host registers,
470 * restore guest registers.
471 * - Otherwise, enable the traps
474 * - If the dirty bit is set, save guest registers, restore host
475 * registers and clear the dirty bit. This ensure that the host can
476 * now use the debug registers.
478 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
479 struct sys_reg_params *p,
480 const struct sys_reg_desc *r)
482 access_rw(vcpu, p, r);
484 vcpu_set_flag(vcpu, DEBUG_DIRTY);
486 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
492 * reg_to_dbg/dbg_to_reg
494 * A 32 bit write to a debug register leave top bits alone
495 * A 32 bit read from a debug register only returns the bottom bits
497 * All writes will set the DEBUG_DIRTY flag to ensure the hyp code
498 * switches between host and guest values in future.
500 static void reg_to_dbg(struct kvm_vcpu *vcpu,
501 struct sys_reg_params *p,
502 const struct sys_reg_desc *rd,
505 u64 mask, shift, val;
507 get_access_mask(rd, &mask, &shift);
511 val |= (p->regval & (mask >> shift)) << shift;
514 vcpu_set_flag(vcpu, DEBUG_DIRTY);
517 static void dbg_to_reg(struct kvm_vcpu *vcpu,
518 struct sys_reg_params *p,
519 const struct sys_reg_desc *rd,
524 get_access_mask(rd, &mask, &shift);
525 p->regval = (*dbg_reg & mask) >> shift;
528 static bool trap_bvr(struct kvm_vcpu *vcpu,
529 struct sys_reg_params *p,
530 const struct sys_reg_desc *rd)
532 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
535 reg_to_dbg(vcpu, p, rd, dbg_reg);
537 dbg_to_reg(vcpu, p, rd, dbg_reg);
539 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
544 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
547 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = val;
551 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
554 *val = vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
558 static u64 reset_bvr(struct kvm_vcpu *vcpu,
559 const struct sys_reg_desc *rd)
561 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
565 static bool trap_bcr(struct kvm_vcpu *vcpu,
566 struct sys_reg_params *p,
567 const struct sys_reg_desc *rd)
569 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
572 reg_to_dbg(vcpu, p, rd, dbg_reg);
574 dbg_to_reg(vcpu, p, rd, dbg_reg);
576 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
581 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
584 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = val;
588 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
591 *val = vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
595 static u64 reset_bcr(struct kvm_vcpu *vcpu,
596 const struct sys_reg_desc *rd)
598 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
602 static bool trap_wvr(struct kvm_vcpu *vcpu,
603 struct sys_reg_params *p,
604 const struct sys_reg_desc *rd)
606 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
609 reg_to_dbg(vcpu, p, rd, dbg_reg);
611 dbg_to_reg(vcpu, p, rd, dbg_reg);
613 trace_trap_reg(__func__, rd->CRm, p->is_write,
614 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
619 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
622 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = val;
626 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
629 *val = vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
633 static u64 reset_wvr(struct kvm_vcpu *vcpu,
634 const struct sys_reg_desc *rd)
636 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
640 static bool trap_wcr(struct kvm_vcpu *vcpu,
641 struct sys_reg_params *p,
642 const struct sys_reg_desc *rd)
644 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
647 reg_to_dbg(vcpu, p, rd, dbg_reg);
649 dbg_to_reg(vcpu, p, rd, dbg_reg);
651 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
656 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
659 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = val;
663 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
666 *val = vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
670 static u64 reset_wcr(struct kvm_vcpu *vcpu,
671 const struct sys_reg_desc *rd)
673 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
677 static u64 reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
679 u64 amair = read_sysreg(amair_el1);
680 vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
684 static u64 reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
686 u64 actlr = read_sysreg(actlr_el1);
687 vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
691 static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
696 * Map the vcpu_id into the first three affinity level fields of
697 * the MPIDR. We limit the number of VCPUs in level 0 due to a
698 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
699 * of the GICv3 to be able to address each CPU directly when
702 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
703 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
704 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
705 mpidr |= (1ULL << 31);
706 vcpu_write_sys_reg(vcpu, mpidr, MPIDR_EL1);
711 static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
712 const struct sys_reg_desc *r)
714 if (kvm_vcpu_has_pmu(vcpu))
720 static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
722 u64 n, mask = BIT(ARMV8_PMU_CYCLE_IDX);
724 /* No PMU available, any PMU reg may UNDEF... */
725 if (!kvm_arm_support_pmu_v3())
728 n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
729 n &= ARMV8_PMU_PMCR_N_MASK;
731 mask |= GENMASK(n - 1, 0);
733 reset_unknown(vcpu, r);
734 __vcpu_sys_reg(vcpu, r->reg) &= mask;
736 return __vcpu_sys_reg(vcpu, r->reg);
739 static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
741 reset_unknown(vcpu, r);
742 __vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
744 return __vcpu_sys_reg(vcpu, r->reg);
747 static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
749 reset_unknown(vcpu, r);
750 __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK;
752 return __vcpu_sys_reg(vcpu, r->reg);
755 static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
757 reset_unknown(vcpu, r);
758 __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
760 return __vcpu_sys_reg(vcpu, r->reg);
763 static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
767 /* No PMU available, PMCR_EL0 may UNDEF... */
768 if (!kvm_arm_support_pmu_v3())
771 /* Only preserve PMCR_EL0.N, and reset the rest to 0 */
772 pmcr = read_sysreg(pmcr_el0) & (ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
773 if (!kvm_supports_32bit_el0())
774 pmcr |= ARMV8_PMU_PMCR_LC;
776 __vcpu_sys_reg(vcpu, r->reg) = pmcr;
778 return __vcpu_sys_reg(vcpu, r->reg);
781 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
783 u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
784 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
787 kvm_inject_undefined(vcpu);
792 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
794 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
797 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
799 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
802 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
804 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
807 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
809 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
812 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
813 const struct sys_reg_desc *r)
817 if (pmu_access_el0_disabled(vcpu))
822 * Only update writeable bits of PMCR (continuing into
823 * kvm_pmu_handle_pmcr() as well)
825 val = __vcpu_sys_reg(vcpu, PMCR_EL0);
826 val &= ~ARMV8_PMU_PMCR_MASK;
827 val |= p->regval & ARMV8_PMU_PMCR_MASK;
828 if (!kvm_supports_32bit_el0())
829 val |= ARMV8_PMU_PMCR_LC;
830 kvm_pmu_handle_pmcr(vcpu, val);
832 /* PMCR.P & PMCR.C are RAZ */
833 val = __vcpu_sys_reg(vcpu, PMCR_EL0)
834 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
841 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
842 const struct sys_reg_desc *r)
844 if (pmu_access_event_counter_el0_disabled(vcpu))
848 __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
850 /* return PMSELR.SEL field */
851 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
852 & ARMV8_PMU_COUNTER_MASK;
857 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
858 const struct sys_reg_desc *r)
860 u64 pmceid, mask, shift;
864 if (pmu_access_el0_disabled(vcpu))
867 get_access_mask(r, &mask, &shift);
869 pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
878 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
882 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
883 val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
884 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
885 kvm_inject_undefined(vcpu);
892 static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
897 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
899 idx = ARMV8_PMU_CYCLE_IDX;
902 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
904 *val = kvm_pmu_get_counter_value(vcpu, idx);
908 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
909 struct sys_reg_params *p,
910 const struct sys_reg_desc *r)
914 if (r->CRn == 9 && r->CRm == 13) {
917 if (pmu_access_event_counter_el0_disabled(vcpu))
920 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
921 & ARMV8_PMU_COUNTER_MASK;
922 } else if (r->Op2 == 0) {
924 if (pmu_access_cycle_counter_el0_disabled(vcpu))
927 idx = ARMV8_PMU_CYCLE_IDX;
929 } else if (r->CRn == 0 && r->CRm == 9) {
931 if (pmu_access_event_counter_el0_disabled(vcpu))
934 idx = ARMV8_PMU_CYCLE_IDX;
935 } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
937 if (pmu_access_event_counter_el0_disabled(vcpu))
940 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
943 /* Catch any decoding mistake */
944 WARN_ON(idx == ~0UL);
946 if (!pmu_counter_idx_valid(vcpu, idx))
950 if (pmu_access_el0_disabled(vcpu))
953 kvm_pmu_set_counter_value(vcpu, idx, p->regval);
955 p->regval = kvm_pmu_get_counter_value(vcpu, idx);
961 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
962 const struct sys_reg_desc *r)
966 if (pmu_access_el0_disabled(vcpu))
969 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
971 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
972 reg = PMEVTYPER0_EL0 + idx;
973 } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
974 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
975 if (idx == ARMV8_PMU_CYCLE_IDX)
979 reg = PMEVTYPER0_EL0 + idx;
984 if (!pmu_counter_idx_valid(vcpu, idx))
988 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
989 __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
990 kvm_vcpu_pmu_restore_guest(vcpu);
992 p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
998 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
999 const struct sys_reg_desc *r)
1003 if (pmu_access_el0_disabled(vcpu))
1006 mask = kvm_pmu_valid_counter_mask(vcpu);
1008 val = p->regval & mask;
1010 /* accessing PMCNTENSET_EL0 */
1011 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
1012 kvm_pmu_enable_counter_mask(vcpu, val);
1013 kvm_vcpu_pmu_restore_guest(vcpu);
1015 /* accessing PMCNTENCLR_EL0 */
1016 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
1017 kvm_pmu_disable_counter_mask(vcpu, val);
1020 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
1026 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1027 const struct sys_reg_desc *r)
1029 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
1031 if (check_pmu_access_disabled(vcpu, 0))
1035 u64 val = p->regval & mask;
1038 /* accessing PMINTENSET_EL1 */
1039 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
1041 /* accessing PMINTENCLR_EL1 */
1042 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
1044 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
1050 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1051 const struct sys_reg_desc *r)
1053 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
1055 if (pmu_access_el0_disabled(vcpu))
1060 /* accessing PMOVSSET_EL0 */
1061 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
1063 /* accessing PMOVSCLR_EL0 */
1064 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
1066 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
1072 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1073 const struct sys_reg_desc *r)
1078 return read_from_write_only(vcpu, p, r);
1080 if (pmu_write_swinc_el0_disabled(vcpu))
1083 mask = kvm_pmu_valid_counter_mask(vcpu);
1084 kvm_pmu_software_increment(vcpu, p->regval & mask);
1088 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1089 const struct sys_reg_desc *r)
1092 if (!vcpu_mode_priv(vcpu)) {
1093 kvm_inject_undefined(vcpu);
1097 __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
1098 p->regval & ARMV8_PMU_USERENR_MASK;
1100 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
1101 & ARMV8_PMU_USERENR_MASK;
1107 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
1108 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
1109 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
1110 trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
1111 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
1112 trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
1113 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
1114 trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
1115 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
1116 trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
1118 #define PMU_SYS_REG(r) \
1119 SYS_DESC(r), .reset = reset_pmu_reg, .visibility = pmu_visibility
1121 /* Macro to expand the PMEVCNTRn_EL0 register */
1122 #define PMU_PMEVCNTR_EL0(n) \
1123 { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
1124 .reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \
1125 .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
1127 /* Macro to expand the PMEVTYPERn_EL0 register */
1128 #define PMU_PMEVTYPER_EL0(n) \
1129 { PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \
1130 .reset = reset_pmevtyper, \
1131 .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
1133 static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1134 const struct sys_reg_desc *r)
1136 kvm_inject_undefined(vcpu);
1141 /* Macro to expand the AMU counter and type registers*/
1142 #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
1143 #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
1144 #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
1145 #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
1147 static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1148 const struct sys_reg_desc *rd)
1150 return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
1154 * If we land here on a PtrAuth access, that is because we didn't
1155 * fixup the access on exit by allowing the PtrAuth sysregs. The only
1156 * way this happens is when the guest does not have PtrAuth support
1159 #define __PTRAUTH_KEY(k) \
1160 { SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \
1161 .visibility = ptrauth_visibility}
1163 #define PTRAUTH_KEY(k) \
1164 __PTRAUTH_KEY(k ## KEYLO_EL1), \
1165 __PTRAUTH_KEY(k ## KEYHI_EL1)
1167 static bool access_arch_timer(struct kvm_vcpu *vcpu,
1168 struct sys_reg_params *p,
1169 const struct sys_reg_desc *r)
1171 enum kvm_arch_timers tmr;
1172 enum kvm_arch_timer_regs treg;
1173 u64 reg = reg_to_encoding(r);
1176 case SYS_CNTP_TVAL_EL0:
1177 case SYS_AARCH32_CNTP_TVAL:
1179 treg = TIMER_REG_TVAL;
1181 case SYS_CNTP_CTL_EL0:
1182 case SYS_AARCH32_CNTP_CTL:
1184 treg = TIMER_REG_CTL;
1186 case SYS_CNTP_CVAL_EL0:
1187 case SYS_AARCH32_CNTP_CVAL:
1189 treg = TIMER_REG_CVAL;
1191 case SYS_CNTPCT_EL0:
1192 case SYS_CNTPCTSS_EL0:
1193 case SYS_AARCH32_CNTPCT:
1195 treg = TIMER_REG_CNT;
1198 print_sys_reg_msg(p, "%s", "Unhandled trapped timer register");
1199 kvm_inject_undefined(vcpu);
1204 kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1206 p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
1211 static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
1214 struct arm64_ftr_bits kvm_ftr = *ftrp;
1216 /* Some features have different safe value type in KVM than host features */
1218 case SYS_ID_AA64DFR0_EL1:
1219 if (kvm_ftr.shift == ID_AA64DFR0_EL1_PMUVer_SHIFT)
1220 kvm_ftr.type = FTR_LOWER_SAFE;
1222 case SYS_ID_DFR0_EL1:
1223 if (kvm_ftr.shift == ID_DFR0_EL1_PerfMon_SHIFT)
1224 kvm_ftr.type = FTR_LOWER_SAFE;
1228 return arm64_ftr_safe_value(&kvm_ftr, new, cur);
1232 * arm64_check_features() - Check if a feature register value constitutes
1233 * a subset of features indicated by the idreg's KVM sanitised limit.
1235 * This function will check if each feature field of @val is the "safe" value
1236 * against idreg's KVM sanitised limit return from reset() callback.
1237 * If a field value in @val is the same as the one in limit, it is always
1238 * considered the safe value regardless For register fields that are not in
1239 * writable, only the value in limit is considered the safe value.
1241 * Return: 0 if all the fields are safe. Otherwise, return negative errno.
1243 static int arm64_check_features(struct kvm_vcpu *vcpu,
1244 const struct sys_reg_desc *rd,
1247 const struct arm64_ftr_reg *ftr_reg;
1248 const struct arm64_ftr_bits *ftrp = NULL;
1249 u32 id = reg_to_encoding(rd);
1250 u64 writable_mask = rd->val;
1251 u64 limit = rd->reset(vcpu, rd);
1255 * Hidden and unallocated ID registers may not have a corresponding
1256 * struct arm64_ftr_reg. Of course, if the register is RAZ we know the
1257 * only safe value is 0.
1259 if (sysreg_visible_as_raz(vcpu, rd))
1260 return val ? -E2BIG : 0;
1262 ftr_reg = get_arm64_ftr_reg(id);
1266 ftrp = ftr_reg->ftr_bits;
1268 for (; ftrp && ftrp->width; ftrp++) {
1269 s64 f_val, f_lim, safe_val;
1272 ftr_mask = arm64_ftr_mask(ftrp);
1273 if ((ftr_mask & writable_mask) != ftr_mask)
1276 f_val = arm64_ftr_value(ftrp, val);
1277 f_lim = arm64_ftr_value(ftrp, limit);
1283 safe_val = kvm_arm64_ftr_safe_value(id, ftrp, f_val, f_lim);
1285 if (safe_val != f_val)
1289 /* For fields that are not writable, values in limit are the safe values. */
1290 if ((val & ~mask) != (limit & ~mask))
1296 static u8 pmuver_to_perfmon(u8 pmuver)
1299 case ID_AA64DFR0_EL1_PMUVer_IMP:
1300 return ID_DFR0_EL1_PerfMon_PMUv3;
1301 case ID_AA64DFR0_EL1_PMUVer_IMP_DEF:
1302 return ID_DFR0_EL1_PerfMon_IMPDEF;
1304 /* Anything ARMv8.1+ and NI have the same value. For now. */
1309 /* Read a sanitised cpufeature ID register by sys_reg_desc */
1310 static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
1311 const struct sys_reg_desc *r)
1313 u32 id = reg_to_encoding(r);
1316 if (sysreg_visible_as_raz(vcpu, r))
1319 val = read_sanitised_ftr_reg(id);
1322 case SYS_ID_AA64PFR1_EL1:
1323 if (!kvm_has_mte(vcpu->kvm))
1324 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
1326 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
1328 case SYS_ID_AA64ISAR1_EL1:
1329 if (!vcpu_has_ptrauth(vcpu))
1330 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
1331 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
1332 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
1333 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
1335 case SYS_ID_AA64ISAR2_EL1:
1336 if (!vcpu_has_ptrauth(vcpu))
1337 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
1338 ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
1339 if (!cpus_have_final_cap(ARM64_HAS_WFXT))
1340 val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
1342 case SYS_ID_AA64MMFR2_EL1:
1343 val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
1345 case SYS_ID_MMFR4_EL1:
1346 val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
1353 static u64 kvm_read_sanitised_id_reg(struct kvm_vcpu *vcpu,
1354 const struct sys_reg_desc *r)
1356 return __kvm_read_sanitised_id_reg(vcpu, r);
1359 static u64 read_id_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
1361 return IDREG(vcpu->kvm, reg_to_encoding(r));
1365 * Return true if the register's (Op0, Op1, CRn, CRm, Op2) is
1366 * (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8.
1368 static inline bool is_id_reg(u32 id)
1370 return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
1371 sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
1372 sys_reg_CRm(id) < 8);
1375 static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
1376 const struct sys_reg_desc *r)
1378 u32 id = reg_to_encoding(r);
1381 case SYS_ID_AA64ZFR0_EL1:
1382 if (!vcpu_has_sve(vcpu))
1390 static unsigned int aa32_id_visibility(const struct kvm_vcpu *vcpu,
1391 const struct sys_reg_desc *r)
1394 * AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any
1395 * EL. Promote to RAZ/WI in order to guarantee consistency between
1398 if (!kvm_supports_32bit_el0())
1399 return REG_RAZ | REG_USER_WI;
1401 return id_visibility(vcpu, r);
1404 static unsigned int raz_visibility(const struct kvm_vcpu *vcpu,
1405 const struct sys_reg_desc *r)
1410 /* cpufeature ID register access trap handlers */
1412 static bool access_id_reg(struct kvm_vcpu *vcpu,
1413 struct sys_reg_params *p,
1414 const struct sys_reg_desc *r)
1417 return write_to_read_only(vcpu, p, r);
1419 p->regval = read_id_reg(vcpu, r);
1420 if (vcpu_has_nv(vcpu))
1421 access_nested_id_reg(vcpu, p, r);
1426 /* Visibility overrides for SVE-specific control registers */
1427 static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
1428 const struct sys_reg_desc *rd)
1430 if (vcpu_has_sve(vcpu))
1436 static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
1437 const struct sys_reg_desc *rd)
1439 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1441 if (!vcpu_has_sve(vcpu))
1442 val &= ~ID_AA64PFR0_EL1_SVE_MASK;
1445 * The default is to expose CSV2 == 1 if the HW isn't affected.
1446 * Although this is a per-CPU feature, we make it global because
1447 * asymmetric systems are just a nuisance.
1449 * Userspace can override this as long as it doesn't promise
1452 if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) {
1453 val &= ~ID_AA64PFR0_EL1_CSV2_MASK;
1454 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV2, IMP);
1456 if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) {
1457 val &= ~ID_AA64PFR0_EL1_CSV3_MASK;
1458 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV3, IMP);
1461 if (kvm_vgic_global_state.type == VGIC_V3) {
1462 val &= ~ID_AA64PFR0_EL1_GIC_MASK;
1463 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
1466 val &= ~ID_AA64PFR0_EL1_AMU_MASK;
1471 static u64 read_sanitised_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
1472 const struct sys_reg_desc *rd)
1474 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1476 /* Limit debug to ARMv8.0 */
1477 val &= ~ID_AA64DFR0_EL1_DebugVer_MASK;
1478 val |= SYS_FIELD_PREP_ENUM(ID_AA64DFR0_EL1, DebugVer, IMP);
1481 * Only initialize the PMU version if the vCPU was configured with one.
1483 val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
1484 if (kvm_vcpu_has_pmu(vcpu))
1485 val |= SYS_FIELD_PREP(ID_AA64DFR0_EL1, PMUVer,
1486 kvm_arm_pmu_get_pmuver_limit());
1488 /* Hide SPE from guests */
1489 val &= ~ID_AA64DFR0_EL1_PMSVer_MASK;
1494 static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
1495 const struct sys_reg_desc *rd,
1498 u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val);
1501 * Prior to commit 3d0dba5764b9 ("KVM: arm64: PMU: Move the
1502 * ID_AA64DFR0_EL1.PMUver limit to VM creation"), KVM erroneously
1503 * exposed an IMP_DEF PMU to userspace and the guest on systems w/
1504 * non-architectural PMUs. Of course, PMUv3 is the only game in town for
1505 * PMU virtualization, so the IMP_DEF value was rather user-hostile.
1507 * At minimum, we're on the hook to allow values that were given to
1508 * userspace by KVM. Cover our tracks here and replace the IMP_DEF value
1509 * with a more sensible NI. The value of an ID register changing under
1510 * the nose of the guest is unfortunate, but is certainly no more
1511 * surprising than an ill-guided PMU driver poking at impdef system
1512 * registers that end in an UNDEF...
1514 if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
1515 val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
1517 return set_id_reg(vcpu, rd, val);
1520 static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu,
1521 const struct sys_reg_desc *rd)
1523 u8 perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
1524 u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1);
1526 val &= ~ID_DFR0_EL1_PerfMon_MASK;
1527 if (kvm_vcpu_has_pmu(vcpu))
1528 val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon);
1533 static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
1534 const struct sys_reg_desc *rd,
1537 u8 perfmon = SYS_FIELD_GET(ID_DFR0_EL1, PerfMon, val);
1539 if (perfmon == ID_DFR0_EL1_PerfMon_IMPDEF) {
1540 val &= ~ID_DFR0_EL1_PerfMon_MASK;
1545 * Allow DFR0_EL1.PerfMon to be set from userspace as long as
1546 * it doesn't promise more than what the HW gives us on the
1547 * AArch64 side (as everything is emulated with that), and
1548 * that this is a PMUv3.
1550 if (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3)
1553 return set_id_reg(vcpu, rd, val);
1557 * cpufeature ID register user accessors
1559 * For now, these registers are immutable for userspace, so no values
1560 * are stored, and for set_id_reg() we don't allow the effective value
1563 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1567 * Avoid locking if the VM has already started, as the ID registers are
1568 * guaranteed to be invariant at that point.
1570 if (kvm_vm_has_ran_once(vcpu->kvm)) {
1571 *val = read_id_reg(vcpu, rd);
1575 mutex_lock(&vcpu->kvm->arch.config_lock);
1576 *val = read_id_reg(vcpu, rd);
1577 mutex_unlock(&vcpu->kvm->arch.config_lock);
1582 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1585 u32 id = reg_to_encoding(rd);
1588 mutex_lock(&vcpu->kvm->arch.config_lock);
1591 * Once the VM has started the ID registers are immutable. Reject any
1592 * write that does not match the final register value.
1594 if (kvm_vm_has_ran_once(vcpu->kvm)) {
1595 if (val != read_id_reg(vcpu, rd))
1600 mutex_unlock(&vcpu->kvm->arch.config_lock);
1604 ret = arm64_check_features(vcpu, rd, val);
1606 IDREG(vcpu->kvm, id) = val;
1608 mutex_unlock(&vcpu->kvm->arch.config_lock);
1611 * arm64_check_features() returns -E2BIG to indicate the register's
1612 * feature set is a superset of the maximally-allowed register value.
1613 * While it would be nice to precisely describe this to userspace, the
1614 * existing UAPI for KVM_SET_ONE_REG has it that invalid register
1615 * writes return -EINVAL.
1622 static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1629 static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1635 static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1636 const struct sys_reg_desc *r)
1639 return write_to_read_only(vcpu, p, r);
1641 p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
1645 static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1646 const struct sys_reg_desc *r)
1649 return write_to_read_only(vcpu, p, r);
1651 p->regval = __vcpu_sys_reg(vcpu, r->reg);
1656 * Fabricate a CLIDR_EL1 value instead of using the real value, which can vary
1657 * by the physical CPU which the vcpu currently resides in.
1659 static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
1661 u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
1665 if ((ctr_el0 & CTR_EL0_IDC)) {
1667 * Data cache clean to the PoU is not required so LoUU and LoUIS
1668 * will not be set and a unified cache, which will be marked as
1669 * LoC, will be added.
1671 * If not DIC, let the unified cache L2 so that an instruction
1672 * cache can be added as L1 later.
1674 loc = (ctr_el0 & CTR_EL0_DIC) ? 1 : 2;
1675 clidr = CACHE_TYPE_UNIFIED << CLIDR_CTYPE_SHIFT(loc);
1678 * Data cache clean to the PoU is required so let L1 have a data
1679 * cache and mark it as LoUU and LoUIS. As L1 has a data cache,
1680 * it can be marked as LoC too.
1683 clidr = 1 << CLIDR_LOUU_SHIFT;
1684 clidr |= 1 << CLIDR_LOUIS_SHIFT;
1685 clidr |= CACHE_TYPE_DATA << CLIDR_CTYPE_SHIFT(1);
1689 * Instruction cache invalidation to the PoU is required so let L1 have
1690 * an instruction cache. If L1 already has a data cache, it will be
1691 * CACHE_TYPE_SEPARATE.
1693 if (!(ctr_el0 & CTR_EL0_DIC))
1694 clidr |= CACHE_TYPE_INST << CLIDR_CTYPE_SHIFT(1);
1696 clidr |= loc << CLIDR_LOC_SHIFT;
1699 * Add tag cache unified to data cache. Allocation tags and data are
1700 * unified in a cache line so that it looks valid even if there is only
1703 if (kvm_has_mte(vcpu->kvm))
1704 clidr |= 2 << CLIDR_TTYPE_SHIFT(loc);
1706 __vcpu_sys_reg(vcpu, r->reg) = clidr;
1708 return __vcpu_sys_reg(vcpu, r->reg);
1711 static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1714 u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
1715 u64 idc = !CLIDR_LOC(val) || (!CLIDR_LOUIS(val) && !CLIDR_LOUU(val));
1717 if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc))
1720 __vcpu_sys_reg(vcpu, rd->reg) = val;
1725 static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1726 const struct sys_reg_desc *r)
1731 vcpu_write_sys_reg(vcpu, p->regval, reg);
1733 p->regval = vcpu_read_sys_reg(vcpu, reg);
1737 static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1738 const struct sys_reg_desc *r)
1743 return write_to_read_only(vcpu, p, r);
1745 csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
1746 csselr &= CSSELR_EL1_Level | CSSELR_EL1_InD;
1747 if (csselr < CSSELR_MAX)
1748 p->regval = get_ccsidr(vcpu, csselr);
1753 static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
1754 const struct sys_reg_desc *rd)
1756 if (kvm_has_mte(vcpu->kvm))
1762 #define MTE_REG(name) { \
1763 SYS_DESC(SYS_##name), \
1764 .access = undef_access, \
1765 .reset = reset_unknown, \
1767 .visibility = mte_visibility, \
1770 static unsigned int el2_visibility(const struct kvm_vcpu *vcpu,
1771 const struct sys_reg_desc *rd)
1773 if (vcpu_has_nv(vcpu))
1779 #define EL2_REG(name, acc, rst, v) { \
1780 SYS_DESC(SYS_##name), \
1784 .visibility = el2_visibility, \
1789 * EL{0,1}2 registers are the EL2 view on an EL0 or EL1 register when
1790 * HCR_EL2.E2H==1, and only in the sysreg table for convenience of
1791 * handling traps. Given that, they are always hidden from userspace.
1793 static unsigned int elx2_visibility(const struct kvm_vcpu *vcpu,
1794 const struct sys_reg_desc *rd)
1796 return REG_HIDDEN_USER;
1799 #define EL12_REG(name, acc, rst, v) { \
1800 SYS_DESC(SYS_##name##_EL12), \
1803 .reg = name##_EL1, \
1805 .visibility = elx2_visibility, \
1809 * Since reset() callback and field val are not used for idregs, they will be
1810 * used for specific purposes for idregs.
1811 * The reset() would return KVM sanitised register value. The value would be the
1812 * same as the host kernel sanitised value if there is no KVM sanitisation.
1813 * The val would be used as a mask indicating writable fields for the idreg.
1814 * Only bits with 1 are writable from userspace. This mask might not be
1815 * necessary in the future whenever all ID registers are enabled as writable
1819 /* sys_reg_desc initialiser for known cpufeature ID registers */
1820 #define ID_SANITISED(name) { \
1821 SYS_DESC(SYS_##name), \
1822 .access = access_id_reg, \
1823 .get_user = get_id_reg, \
1824 .set_user = set_id_reg, \
1825 .visibility = id_visibility, \
1826 .reset = kvm_read_sanitised_id_reg, \
1830 /* sys_reg_desc initialiser for known cpufeature ID registers */
1831 #define AA32_ID_SANITISED(name) { \
1832 SYS_DESC(SYS_##name), \
1833 .access = access_id_reg, \
1834 .get_user = get_id_reg, \
1835 .set_user = set_id_reg, \
1836 .visibility = aa32_id_visibility, \
1837 .reset = kvm_read_sanitised_id_reg, \
1842 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
1843 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
1844 * (1 <= crm < 8, 0 <= Op2 < 8).
1846 #define ID_UNALLOCATED(crm, op2) { \
1847 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
1848 .access = access_id_reg, \
1849 .get_user = get_id_reg, \
1850 .set_user = set_id_reg, \
1851 .visibility = raz_visibility, \
1852 .reset = kvm_read_sanitised_id_reg, \
1857 * sys_reg_desc initialiser for known ID registers that we hide from guests.
1858 * For now, these are exposed just like unallocated ID regs: they appear
1859 * RAZ for the guest.
1861 #define ID_HIDDEN(name) { \
1862 SYS_DESC(SYS_##name), \
1863 .access = access_id_reg, \
1864 .get_user = get_id_reg, \
1865 .set_user = set_id_reg, \
1866 .visibility = raz_visibility, \
1867 .reset = kvm_read_sanitised_id_reg, \
1871 static bool access_sp_el1(struct kvm_vcpu *vcpu,
1872 struct sys_reg_params *p,
1873 const struct sys_reg_desc *r)
1876 __vcpu_sys_reg(vcpu, SP_EL1) = p->regval;
1878 p->regval = __vcpu_sys_reg(vcpu, SP_EL1);
1883 static bool access_elr(struct kvm_vcpu *vcpu,
1884 struct sys_reg_params *p,
1885 const struct sys_reg_desc *r)
1888 vcpu_write_sys_reg(vcpu, p->regval, ELR_EL1);
1890 p->regval = vcpu_read_sys_reg(vcpu, ELR_EL1);
1895 static bool access_spsr(struct kvm_vcpu *vcpu,
1896 struct sys_reg_params *p,
1897 const struct sys_reg_desc *r)
1900 __vcpu_sys_reg(vcpu, SPSR_EL1) = p->regval;
1902 p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1);
1908 * Architected system registers.
1909 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
1911 * Debug handling: We do trap most, if not all debug related system
1912 * registers. The implementation is good enough to ensure that a guest
1913 * can use these with minimal performance degradation. The drawback is
1914 * that we don't implement any of the external debug architecture.
1915 * This should be revisited if we ever encounter a more demanding
1918 static const struct sys_reg_desc sys_reg_descs[] = {
1919 { SYS_DESC(SYS_DC_ISW), access_dcsw },
1920 { SYS_DESC(SYS_DC_IGSW), access_dcgsw },
1921 { SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
1922 { SYS_DESC(SYS_DC_CSW), access_dcsw },
1923 { SYS_DESC(SYS_DC_CGSW), access_dcgsw },
1924 { SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
1925 { SYS_DESC(SYS_DC_CISW), access_dcsw },
1926 { SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
1927 { SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
1929 DBG_BCR_BVR_WCR_WVR_EL1(0),
1930 DBG_BCR_BVR_WCR_WVR_EL1(1),
1931 { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
1932 { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
1933 DBG_BCR_BVR_WCR_WVR_EL1(2),
1934 DBG_BCR_BVR_WCR_WVR_EL1(3),
1935 DBG_BCR_BVR_WCR_WVR_EL1(4),
1936 DBG_BCR_BVR_WCR_WVR_EL1(5),
1937 DBG_BCR_BVR_WCR_WVR_EL1(6),
1938 DBG_BCR_BVR_WCR_WVR_EL1(7),
1939 DBG_BCR_BVR_WCR_WVR_EL1(8),
1940 DBG_BCR_BVR_WCR_WVR_EL1(9),
1941 DBG_BCR_BVR_WCR_WVR_EL1(10),
1942 DBG_BCR_BVR_WCR_WVR_EL1(11),
1943 DBG_BCR_BVR_WCR_WVR_EL1(12),
1944 DBG_BCR_BVR_WCR_WVR_EL1(13),
1945 DBG_BCR_BVR_WCR_WVR_EL1(14),
1946 DBG_BCR_BVR_WCR_WVR_EL1(15),
1948 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
1949 { SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 },
1950 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1,
1951 SYS_OSLSR_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, },
1952 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
1953 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
1954 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
1955 { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
1956 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
1958 { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
1959 { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
1960 // DBGDTR[TR]X_EL0 share the same encoding
1961 { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
1963 { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
1965 { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
1968 * ID regs: all ID_SANITISED() entries here must have corresponding
1969 * entries in arm64_ftr_regs[].
1972 /* AArch64 mappings of the AArch32 ID registers */
1974 AA32_ID_SANITISED(ID_PFR0_EL1),
1975 AA32_ID_SANITISED(ID_PFR1_EL1),
1976 { SYS_DESC(SYS_ID_DFR0_EL1),
1977 .access = access_id_reg,
1978 .get_user = get_id_reg,
1979 .set_user = set_id_dfr0_el1,
1980 .visibility = aa32_id_visibility,
1981 .reset = read_sanitised_id_dfr0_el1,
1982 .val = ID_DFR0_EL1_PerfMon_MASK, },
1983 ID_HIDDEN(ID_AFR0_EL1),
1984 AA32_ID_SANITISED(ID_MMFR0_EL1),
1985 AA32_ID_SANITISED(ID_MMFR1_EL1),
1986 AA32_ID_SANITISED(ID_MMFR2_EL1),
1987 AA32_ID_SANITISED(ID_MMFR3_EL1),
1990 AA32_ID_SANITISED(ID_ISAR0_EL1),
1991 AA32_ID_SANITISED(ID_ISAR1_EL1),
1992 AA32_ID_SANITISED(ID_ISAR2_EL1),
1993 AA32_ID_SANITISED(ID_ISAR3_EL1),
1994 AA32_ID_SANITISED(ID_ISAR4_EL1),
1995 AA32_ID_SANITISED(ID_ISAR5_EL1),
1996 AA32_ID_SANITISED(ID_MMFR4_EL1),
1997 AA32_ID_SANITISED(ID_ISAR6_EL1),
2000 AA32_ID_SANITISED(MVFR0_EL1),
2001 AA32_ID_SANITISED(MVFR1_EL1),
2002 AA32_ID_SANITISED(MVFR2_EL1),
2003 ID_UNALLOCATED(3,3),
2004 AA32_ID_SANITISED(ID_PFR2_EL1),
2005 ID_HIDDEN(ID_DFR1_EL1),
2006 AA32_ID_SANITISED(ID_MMFR5_EL1),
2007 ID_UNALLOCATED(3,7),
2009 /* AArch64 ID registers */
2011 { SYS_DESC(SYS_ID_AA64PFR0_EL1),
2012 .access = access_id_reg,
2013 .get_user = get_id_reg,
2014 .set_user = set_id_reg,
2015 .reset = read_sanitised_id_aa64pfr0_el1,
2016 .val = ID_AA64PFR0_EL1_CSV2_MASK | ID_AA64PFR0_EL1_CSV3_MASK, },
2017 ID_SANITISED(ID_AA64PFR1_EL1),
2018 ID_UNALLOCATED(4,2),
2019 ID_UNALLOCATED(4,3),
2020 ID_SANITISED(ID_AA64ZFR0_EL1),
2021 ID_HIDDEN(ID_AA64SMFR0_EL1),
2022 ID_UNALLOCATED(4,6),
2023 ID_UNALLOCATED(4,7),
2026 { SYS_DESC(SYS_ID_AA64DFR0_EL1),
2027 .access = access_id_reg,
2028 .get_user = get_id_reg,
2029 .set_user = set_id_aa64dfr0_el1,
2030 .reset = read_sanitised_id_aa64dfr0_el1,
2031 .val = ID_AA64DFR0_EL1_PMUVer_MASK, },
2032 ID_SANITISED(ID_AA64DFR1_EL1),
2033 ID_UNALLOCATED(5,2),
2034 ID_UNALLOCATED(5,3),
2035 ID_HIDDEN(ID_AA64AFR0_EL1),
2036 ID_HIDDEN(ID_AA64AFR1_EL1),
2037 ID_UNALLOCATED(5,6),
2038 ID_UNALLOCATED(5,7),
2041 ID_SANITISED(ID_AA64ISAR0_EL1),
2042 ID_SANITISED(ID_AA64ISAR1_EL1),
2043 ID_SANITISED(ID_AA64ISAR2_EL1),
2044 ID_UNALLOCATED(6,3),
2045 ID_UNALLOCATED(6,4),
2046 ID_UNALLOCATED(6,5),
2047 ID_UNALLOCATED(6,6),
2048 ID_UNALLOCATED(6,7),
2051 ID_SANITISED(ID_AA64MMFR0_EL1),
2052 ID_SANITISED(ID_AA64MMFR1_EL1),
2053 ID_SANITISED(ID_AA64MMFR2_EL1),
2054 ID_UNALLOCATED(7,3),
2055 ID_UNALLOCATED(7,4),
2056 ID_UNALLOCATED(7,5),
2057 ID_UNALLOCATED(7,6),
2058 ID_UNALLOCATED(7,7),
2060 { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
2061 { SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
2062 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
2067 { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
2068 { SYS_DESC(SYS_TRFCR_EL1), undef_access },
2069 { SYS_DESC(SYS_SMPRI_EL1), undef_access },
2070 { SYS_DESC(SYS_SMCR_EL1), undef_access },
2071 { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
2072 { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
2073 { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
2081 { SYS_DESC(SYS_SPSR_EL1), access_spsr},
2082 { SYS_DESC(SYS_ELR_EL1), access_elr},
2084 { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
2085 { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
2086 { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
2088 { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
2089 { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
2090 { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
2091 { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
2092 { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
2093 { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
2094 { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
2095 { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
2098 MTE_REG(TFSRE0_EL1),
2100 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
2101 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
2103 { SYS_DESC(SYS_PMSCR_EL1), undef_access },
2104 { SYS_DESC(SYS_PMSNEVFR_EL1), undef_access },
2105 { SYS_DESC(SYS_PMSICR_EL1), undef_access },
2106 { SYS_DESC(SYS_PMSIRR_EL1), undef_access },
2107 { SYS_DESC(SYS_PMSFCR_EL1), undef_access },
2108 { SYS_DESC(SYS_PMSEVFR_EL1), undef_access },
2109 { SYS_DESC(SYS_PMSLATFR_EL1), undef_access },
2110 { SYS_DESC(SYS_PMSIDR_EL1), undef_access },
2111 { SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
2112 { SYS_DESC(SYS_PMBPTR_EL1), undef_access },
2113 { SYS_DESC(SYS_PMBSR_EL1), undef_access },
2114 /* PMBIDR_EL1 is not trapped */
2116 { PMU_SYS_REG(SYS_PMINTENSET_EL1),
2117 .access = access_pminten, .reg = PMINTENSET_EL1 },
2118 { PMU_SYS_REG(SYS_PMINTENCLR_EL1),
2119 .access = access_pminten, .reg = PMINTENSET_EL1 },
2120 { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
2122 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
2123 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
2125 { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
2126 { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
2127 { SYS_DESC(SYS_LORN_EL1), trap_loregion },
2128 { SYS_DESC(SYS_LORC_EL1), trap_loregion },
2129 { SYS_DESC(SYS_LORID_EL1), trap_loregion },
2131 { SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 },
2132 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
2134 { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
2135 { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
2136 { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
2137 { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
2138 { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
2139 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
2140 { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
2141 { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
2142 { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
2143 { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
2144 { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
2145 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
2147 { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
2148 { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
2150 { SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
2152 { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
2154 { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
2155 { SYS_DESC(SYS_CLIDR_EL1), access_clidr, reset_clidr, CLIDR_EL1,
2156 .set_user = set_clidr },
2157 { SYS_DESC(SYS_CCSIDR2_EL1), undef_access },
2158 { SYS_DESC(SYS_SMIDR_EL1), undef_access },
2159 { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
2160 { SYS_DESC(SYS_CTR_EL0), access_ctr },
2161 { SYS_DESC(SYS_SVCR), undef_access },
2163 { PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr,
2164 .reset = reset_pmcr, .reg = PMCR_EL0 },
2165 { PMU_SYS_REG(SYS_PMCNTENSET_EL0),
2166 .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
2167 { PMU_SYS_REG(SYS_PMCNTENCLR_EL0),
2168 .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
2169 { PMU_SYS_REG(SYS_PMOVSCLR_EL0),
2170 .access = access_pmovs, .reg = PMOVSSET_EL0 },
2172 * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
2173 * previously (and pointlessly) advertised in the past...
2175 { PMU_SYS_REG(SYS_PMSWINC_EL0),
2176 .get_user = get_raz_reg, .set_user = set_wi_reg,
2177 .access = access_pmswinc, .reset = NULL },
2178 { PMU_SYS_REG(SYS_PMSELR_EL0),
2179 .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
2180 { PMU_SYS_REG(SYS_PMCEID0_EL0),
2181 .access = access_pmceid, .reset = NULL },
2182 { PMU_SYS_REG(SYS_PMCEID1_EL0),
2183 .access = access_pmceid, .reset = NULL },
2184 { PMU_SYS_REG(SYS_PMCCNTR_EL0),
2185 .access = access_pmu_evcntr, .reset = reset_unknown,
2186 .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr},
2187 { PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
2188 .access = access_pmu_evtyper, .reset = NULL },
2189 { PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
2190 .access = access_pmu_evcntr, .reset = NULL },
2192 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
2193 * in 32bit mode. Here we choose to reset it as zero for consistency.
2195 { PMU_SYS_REG(SYS_PMUSERENR_EL0), .access = access_pmuserenr,
2196 .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
2197 { PMU_SYS_REG(SYS_PMOVSSET_EL0),
2198 .access = access_pmovs, .reg = PMOVSSET_EL0 },
2200 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
2201 { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
2202 { SYS_DESC(SYS_TPIDR2_EL0), undef_access },
2204 { SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
2206 { SYS_DESC(SYS_AMCR_EL0), undef_access },
2207 { SYS_DESC(SYS_AMCFGR_EL0), undef_access },
2208 { SYS_DESC(SYS_AMCGCR_EL0), undef_access },
2209 { SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
2210 { SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
2211 { SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
2212 { SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
2213 { SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
2214 AMU_AMEVCNTR0_EL0(0),
2215 AMU_AMEVCNTR0_EL0(1),
2216 AMU_AMEVCNTR0_EL0(2),
2217 AMU_AMEVCNTR0_EL0(3),
2218 AMU_AMEVCNTR0_EL0(4),
2219 AMU_AMEVCNTR0_EL0(5),
2220 AMU_AMEVCNTR0_EL0(6),
2221 AMU_AMEVCNTR0_EL0(7),
2222 AMU_AMEVCNTR0_EL0(8),
2223 AMU_AMEVCNTR0_EL0(9),
2224 AMU_AMEVCNTR0_EL0(10),
2225 AMU_AMEVCNTR0_EL0(11),
2226 AMU_AMEVCNTR0_EL0(12),
2227 AMU_AMEVCNTR0_EL0(13),
2228 AMU_AMEVCNTR0_EL0(14),
2229 AMU_AMEVCNTR0_EL0(15),
2230 AMU_AMEVTYPER0_EL0(0),
2231 AMU_AMEVTYPER0_EL0(1),
2232 AMU_AMEVTYPER0_EL0(2),
2233 AMU_AMEVTYPER0_EL0(3),
2234 AMU_AMEVTYPER0_EL0(4),
2235 AMU_AMEVTYPER0_EL0(5),
2236 AMU_AMEVTYPER0_EL0(6),
2237 AMU_AMEVTYPER0_EL0(7),
2238 AMU_AMEVTYPER0_EL0(8),
2239 AMU_AMEVTYPER0_EL0(9),
2240 AMU_AMEVTYPER0_EL0(10),
2241 AMU_AMEVTYPER0_EL0(11),
2242 AMU_AMEVTYPER0_EL0(12),
2243 AMU_AMEVTYPER0_EL0(13),
2244 AMU_AMEVTYPER0_EL0(14),
2245 AMU_AMEVTYPER0_EL0(15),
2246 AMU_AMEVCNTR1_EL0(0),
2247 AMU_AMEVCNTR1_EL0(1),
2248 AMU_AMEVCNTR1_EL0(2),
2249 AMU_AMEVCNTR1_EL0(3),
2250 AMU_AMEVCNTR1_EL0(4),
2251 AMU_AMEVCNTR1_EL0(5),
2252 AMU_AMEVCNTR1_EL0(6),
2253 AMU_AMEVCNTR1_EL0(7),
2254 AMU_AMEVCNTR1_EL0(8),
2255 AMU_AMEVCNTR1_EL0(9),
2256 AMU_AMEVCNTR1_EL0(10),
2257 AMU_AMEVCNTR1_EL0(11),
2258 AMU_AMEVCNTR1_EL0(12),
2259 AMU_AMEVCNTR1_EL0(13),
2260 AMU_AMEVCNTR1_EL0(14),
2261 AMU_AMEVCNTR1_EL0(15),
2262 AMU_AMEVTYPER1_EL0(0),
2263 AMU_AMEVTYPER1_EL0(1),
2264 AMU_AMEVTYPER1_EL0(2),
2265 AMU_AMEVTYPER1_EL0(3),
2266 AMU_AMEVTYPER1_EL0(4),
2267 AMU_AMEVTYPER1_EL0(5),
2268 AMU_AMEVTYPER1_EL0(6),
2269 AMU_AMEVTYPER1_EL0(7),
2270 AMU_AMEVTYPER1_EL0(8),
2271 AMU_AMEVTYPER1_EL0(9),
2272 AMU_AMEVTYPER1_EL0(10),
2273 AMU_AMEVTYPER1_EL0(11),
2274 AMU_AMEVTYPER1_EL0(12),
2275 AMU_AMEVTYPER1_EL0(13),
2276 AMU_AMEVTYPER1_EL0(14),
2277 AMU_AMEVTYPER1_EL0(15),
2279 { SYS_DESC(SYS_CNTPCT_EL0), access_arch_timer },
2280 { SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer },
2281 { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
2282 { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
2283 { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
2286 PMU_PMEVCNTR_EL0(0),
2287 PMU_PMEVCNTR_EL0(1),
2288 PMU_PMEVCNTR_EL0(2),
2289 PMU_PMEVCNTR_EL0(3),
2290 PMU_PMEVCNTR_EL0(4),
2291 PMU_PMEVCNTR_EL0(5),
2292 PMU_PMEVCNTR_EL0(6),
2293 PMU_PMEVCNTR_EL0(7),
2294 PMU_PMEVCNTR_EL0(8),
2295 PMU_PMEVCNTR_EL0(9),
2296 PMU_PMEVCNTR_EL0(10),
2297 PMU_PMEVCNTR_EL0(11),
2298 PMU_PMEVCNTR_EL0(12),
2299 PMU_PMEVCNTR_EL0(13),
2300 PMU_PMEVCNTR_EL0(14),
2301 PMU_PMEVCNTR_EL0(15),
2302 PMU_PMEVCNTR_EL0(16),
2303 PMU_PMEVCNTR_EL0(17),
2304 PMU_PMEVCNTR_EL0(18),
2305 PMU_PMEVCNTR_EL0(19),
2306 PMU_PMEVCNTR_EL0(20),
2307 PMU_PMEVCNTR_EL0(21),
2308 PMU_PMEVCNTR_EL0(22),
2309 PMU_PMEVCNTR_EL0(23),
2310 PMU_PMEVCNTR_EL0(24),
2311 PMU_PMEVCNTR_EL0(25),
2312 PMU_PMEVCNTR_EL0(26),
2313 PMU_PMEVCNTR_EL0(27),
2314 PMU_PMEVCNTR_EL0(28),
2315 PMU_PMEVCNTR_EL0(29),
2316 PMU_PMEVCNTR_EL0(30),
2317 /* PMEVTYPERn_EL0 */
2318 PMU_PMEVTYPER_EL0(0),
2319 PMU_PMEVTYPER_EL0(1),
2320 PMU_PMEVTYPER_EL0(2),
2321 PMU_PMEVTYPER_EL0(3),
2322 PMU_PMEVTYPER_EL0(4),
2323 PMU_PMEVTYPER_EL0(5),
2324 PMU_PMEVTYPER_EL0(6),
2325 PMU_PMEVTYPER_EL0(7),
2326 PMU_PMEVTYPER_EL0(8),
2327 PMU_PMEVTYPER_EL0(9),
2328 PMU_PMEVTYPER_EL0(10),
2329 PMU_PMEVTYPER_EL0(11),
2330 PMU_PMEVTYPER_EL0(12),
2331 PMU_PMEVTYPER_EL0(13),
2332 PMU_PMEVTYPER_EL0(14),
2333 PMU_PMEVTYPER_EL0(15),
2334 PMU_PMEVTYPER_EL0(16),
2335 PMU_PMEVTYPER_EL0(17),
2336 PMU_PMEVTYPER_EL0(18),
2337 PMU_PMEVTYPER_EL0(19),
2338 PMU_PMEVTYPER_EL0(20),
2339 PMU_PMEVTYPER_EL0(21),
2340 PMU_PMEVTYPER_EL0(22),
2341 PMU_PMEVTYPER_EL0(23),
2342 PMU_PMEVTYPER_EL0(24),
2343 PMU_PMEVTYPER_EL0(25),
2344 PMU_PMEVTYPER_EL0(26),
2345 PMU_PMEVTYPER_EL0(27),
2346 PMU_PMEVTYPER_EL0(28),
2347 PMU_PMEVTYPER_EL0(29),
2348 PMU_PMEVTYPER_EL0(30),
2350 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
2351 * in 32bit mode. Here we choose to reset it as zero for consistency.
2353 { PMU_SYS_REG(SYS_PMCCFILTR_EL0), .access = access_pmu_evtyper,
2354 .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
2356 EL2_REG(VPIDR_EL2, access_rw, reset_unknown, 0),
2357 EL2_REG(VMPIDR_EL2, access_rw, reset_unknown, 0),
2358 EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
2359 EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
2360 EL2_REG(HCR_EL2, access_rw, reset_val, 0),
2361 EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
2362 EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
2363 EL2_REG(HSTR_EL2, access_rw, reset_val, 0),
2364 EL2_REG(HACR_EL2, access_rw, reset_val, 0),
2366 EL2_REG(TTBR0_EL2, access_rw, reset_val, 0),
2367 EL2_REG(TTBR1_EL2, access_rw, reset_val, 0),
2368 EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1),
2369 EL2_REG(VTTBR_EL2, access_rw, reset_val, 0),
2370 EL2_REG(VTCR_EL2, access_rw, reset_val, 0),
2372 { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
2373 EL2_REG(SPSR_EL2, access_rw, reset_val, 0),
2374 EL2_REG(ELR_EL2, access_rw, reset_val, 0),
2375 { SYS_DESC(SYS_SP_EL1), access_sp_el1},
2377 { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
2378 EL2_REG(AFSR0_EL2, access_rw, reset_val, 0),
2379 EL2_REG(AFSR1_EL2, access_rw, reset_val, 0),
2380 EL2_REG(ESR_EL2, access_rw, reset_val, 0),
2381 { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
2383 EL2_REG(FAR_EL2, access_rw, reset_val, 0),
2384 EL2_REG(HPFAR_EL2, access_rw, reset_val, 0),
2386 EL2_REG(MAIR_EL2, access_rw, reset_val, 0),
2387 EL2_REG(AMAIR_EL2, access_rw, reset_val, 0),
2389 EL2_REG(VBAR_EL2, access_rw, reset_val, 0),
2390 EL2_REG(RVBAR_EL2, access_rw, reset_val, 0),
2391 { SYS_DESC(SYS_RMR_EL2), trap_undef },
2393 EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0),
2394 EL2_REG(TPIDR_EL2, access_rw, reset_val, 0),
2396 EL2_REG(CNTVOFF_EL2, access_rw, reset_val, 0),
2397 EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0),
2399 EL12_REG(SCTLR, access_vm_reg, reset_val, 0x00C50078),
2400 EL12_REG(CPACR, access_rw, reset_val, 0),
2401 EL12_REG(TTBR0, access_vm_reg, reset_unknown, 0),
2402 EL12_REG(TTBR1, access_vm_reg, reset_unknown, 0),
2403 EL12_REG(TCR, access_vm_reg, reset_val, 0),
2404 { SYS_DESC(SYS_SPSR_EL12), access_spsr},
2405 { SYS_DESC(SYS_ELR_EL12), access_elr},
2406 EL12_REG(AFSR0, access_vm_reg, reset_unknown, 0),
2407 EL12_REG(AFSR1, access_vm_reg, reset_unknown, 0),
2408 EL12_REG(ESR, access_vm_reg, reset_unknown, 0),
2409 EL12_REG(FAR, access_vm_reg, reset_unknown, 0),
2410 EL12_REG(MAIR, access_vm_reg, reset_unknown, 0),
2411 EL12_REG(AMAIR, access_vm_reg, reset_amair_el1, 0),
2412 EL12_REG(VBAR, access_rw, reset_val, 0),
2413 EL12_REG(CONTEXTIDR, access_vm_reg, reset_val, 0),
2414 EL12_REG(CNTKCTL, access_rw, reset_val, 0),
2416 EL2_REG(SP_EL2, NULL, reset_unknown, 0),
2419 static const struct sys_reg_desc *first_idreg;
2421 static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
2422 struct sys_reg_params *p,
2423 const struct sys_reg_desc *r)
2426 return ignore_write(vcpu, p);
2428 u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
2429 u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
2430 u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL1_EL3_SHIFT);
2432 p->regval = ((((dfr >> ID_AA64DFR0_EL1_WRPs_SHIFT) & 0xf) << 28) |
2433 (((dfr >> ID_AA64DFR0_EL1_BRPs_SHIFT) & 0xf) << 24) |
2434 (((dfr >> ID_AA64DFR0_EL1_CTX_CMPs_SHIFT) & 0xf) << 20)
2435 | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12));
2441 * AArch32 debug register mappings
2443 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
2444 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
2446 * None of the other registers share their location, so treat them as
2447 * if they were 64bit.
2449 #define DBG_BCR_BVR_WCR_WVR(n) \
2451 { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
2453 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
2455 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
2457 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
2459 #define DBGBXVR(n) \
2460 { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_bvr, NULL, n }
2463 * Trapped cp14 registers. We generally ignore most of the external
2464 * debug, on the principle that they don't really make sense to a
2465 * guest. Revisit this one day, would this principle change.
2467 static const struct sys_reg_desc cp14_regs[] = {
2469 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
2471 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
2473 DBG_BCR_BVR_WCR_WVR(0),
2475 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
2476 DBG_BCR_BVR_WCR_WVR(1),
2478 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 },
2480 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 },
2481 DBG_BCR_BVR_WCR_WVR(2),
2482 /* DBGDTR[RT]Xint */
2483 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
2484 /* DBGDTR[RT]Xext */
2485 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
2486 DBG_BCR_BVR_WCR_WVR(3),
2487 DBG_BCR_BVR_WCR_WVR(4),
2488 DBG_BCR_BVR_WCR_WVR(5),
2490 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
2492 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
2493 DBG_BCR_BVR_WCR_WVR(6),
2495 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 },
2496 DBG_BCR_BVR_WCR_WVR(7),
2497 DBG_BCR_BVR_WCR_WVR(8),
2498 DBG_BCR_BVR_WCR_WVR(9),
2499 DBG_BCR_BVR_WCR_WVR(10),
2500 DBG_BCR_BVR_WCR_WVR(11),
2501 DBG_BCR_BVR_WCR_WVR(12),
2502 DBG_BCR_BVR_WCR_WVR(13),
2503 DBG_BCR_BVR_WCR_WVR(14),
2504 DBG_BCR_BVR_WCR_WVR(15),
2506 /* DBGDRAR (32bit) */
2507 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
2511 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 },
2514 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 },
2518 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
2521 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
2534 /* DBGDSAR (32bit) */
2535 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
2538 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
2540 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
2542 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
2544 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
2546 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
2548 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
2551 /* Trapped cp14 64bit registers */
2552 static const struct sys_reg_desc cp14_64_regs[] = {
2553 /* DBGDRAR (64bit) */
2554 { Op1( 0), CRm( 1), .access = trap_raz_wi },
2556 /* DBGDSAR (64bit) */
2557 { Op1( 0), CRm( 2), .access = trap_raz_wi },
2560 #define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2) \
2562 Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2), \
2563 .visibility = pmu_visibility
2565 /* Macro to expand the PMEVCNTRn register */
2566 #define PMU_PMEVCNTR(n) \
2567 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
2568 (0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
2569 .access = access_pmu_evcntr }
2571 /* Macro to expand the PMEVTYPERn register */
2572 #define PMU_PMEVTYPER(n) \
2573 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
2574 (0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
2575 .access = access_pmu_evtyper }
2577 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
2578 * depending on the way they are accessed (as a 32bit or a 64bit
2581 static const struct sys_reg_desc cp15_regs[] = {
2582 { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
2583 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 },
2585 { AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 },
2587 { AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 },
2588 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
2589 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 },
2591 { AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 },
2593 { AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
2594 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
2596 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
2597 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
2599 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 },
2601 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 },
2603 { AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 },
2605 { AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 },
2608 * DC{C,I,CI}SW operations:
2610 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
2611 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
2612 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
2615 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 0), .access = access_pmcr },
2616 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 1), .access = access_pmcnten },
2617 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 2), .access = access_pmcnten },
2618 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 3), .access = access_pmovs },
2619 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 4), .access = access_pmswinc },
2620 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 5), .access = access_pmselr },
2621 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 6), .access = access_pmceid },
2622 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 7), .access = access_pmceid },
2623 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 0), .access = access_pmu_evcntr },
2624 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 1), .access = access_pmu_evtyper },
2625 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 2), .access = access_pmu_evcntr },
2626 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 0), .access = access_pmuserenr },
2627 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 1), .access = access_pminten },
2628 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 2), .access = access_pminten },
2629 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 3), .access = access_pmovs },
2630 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 4), .access = access_pmceid },
2631 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 5), .access = access_pmceid },
2633 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 6), .access = trap_raz_wi },
2636 { AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
2638 { AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 },
2640 { AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 },
2642 { AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
2645 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
2647 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
2650 { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
2651 { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
2718 { CP15_PMU_SYS_REG(DIRECT, 0, 14, 15, 7), .access = access_pmu_evtyper },
2720 { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
2721 { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
2724 { Op1(1), CRn( 0), CRm( 0), Op2(2), undef_access },
2726 { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 },
2729 static const struct sys_reg_desc cp15_64_regs[] = {
2730 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
2731 { CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr },
2732 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
2733 { SYS_DESC(SYS_AARCH32_CNTPCT), access_arch_timer },
2734 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
2735 { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
2736 { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
2737 { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
2738 { SYS_DESC(SYS_AARCH32_CNTPCTSS), access_arch_timer },
2741 static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
2746 for (i = 0; i < n; i++) {
2747 if (!is_32 && table[i].reg && !table[i].reset) {
2748 kvm_err("sys_reg table %pS entry %d lacks reset\n", &table[i], i);
2752 if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2753 kvm_err("sys_reg table %pS entry %d out of order\n", &table[i - 1], i - 1);
2761 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
2763 kvm_inject_undefined(vcpu);
2767 static void perform_access(struct kvm_vcpu *vcpu,
2768 struct sys_reg_params *params,
2769 const struct sys_reg_desc *r)
2771 trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
2773 /* Check for regs disabled by runtime config */
2774 if (sysreg_hidden(vcpu, r)) {
2775 kvm_inject_undefined(vcpu);
2780 * Not having an accessor means that we have configured a trap
2781 * that we don't know how to handle. This certainly qualifies
2782 * as a gross bug that should be fixed right away.
2786 /* Skip instruction if instructed so */
2787 if (likely(r->access(vcpu, params, r)))
2792 * emulate_cp -- tries to match a sys_reg access in a handling table, and
2793 * call the corresponding trap handler.
2795 * @params: pointer to the descriptor of the access
2796 * @table: array of trap descriptors
2797 * @num: size of the trap descriptor array
2799 * Return true if the access has been handled, false if not.
2801 static bool emulate_cp(struct kvm_vcpu *vcpu,
2802 struct sys_reg_params *params,
2803 const struct sys_reg_desc *table,
2806 const struct sys_reg_desc *r;
2809 return false; /* Not handled */
2811 r = find_reg(params, table, num);
2814 perform_access(vcpu, params, r);
2822 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
2823 struct sys_reg_params *params)
2825 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
2829 case ESR_ELx_EC_CP15_32:
2830 case ESR_ELx_EC_CP15_64:
2833 case ESR_ELx_EC_CP14_MR:
2834 case ESR_ELx_EC_CP14_64:
2841 print_sys_reg_msg(params,
2842 "Unsupported guest CP%d access at: %08lx [%08lx]\n",
2843 cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2844 kvm_inject_undefined(vcpu);
2848 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
2849 * @vcpu: The VCPU pointer
2850 * @run: The kvm_run struct
2852 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
2853 const struct sys_reg_desc *global,
2856 struct sys_reg_params params;
2857 u64 esr = kvm_vcpu_get_esr(vcpu);
2858 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2859 int Rt2 = (esr >> 10) & 0x1f;
2861 params.CRm = (esr >> 1) & 0xf;
2862 params.is_write = ((esr & 1) == 0);
2865 params.Op1 = (esr >> 16) & 0xf;
2870 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
2871 * backends between AArch32 and AArch64, we get away with it.
2873 if (params.is_write) {
2874 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
2875 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
2879 * If the table contains a handler, handle the
2880 * potential register operation in the case of a read and return
2883 if (emulate_cp(vcpu, ¶ms, global, nr_global)) {
2884 /* Split up the value between registers for the read side */
2885 if (!params.is_write) {
2886 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
2887 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
2893 unhandled_cp_access(vcpu, ¶ms);
2897 static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params);
2900 * The CP10 ID registers are architecturally mapped to AArch64 feature
2901 * registers. Abuse that fact so we can rely on the AArch64 handler for accesses
2904 static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params)
2906 u8 reg_id = (esr >> 10) & 0xf;
2909 params->is_write = ((esr & 1) == 0);
2915 /* CP10 ID registers are read-only */
2916 valid = !params->is_write;
2938 kvm_pr_unimpl("Unhandled cp10 register %s: %u\n",
2939 params->is_write ? "write" : "read", reg_id);
2944 * kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and
2945 * VFP Register' from AArch32.
2946 * @vcpu: The vCPU pointer
2948 * MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers.
2949 * Work out the correct AArch64 system register encoding and reroute to the
2950 * AArch64 system register emulation.
2952 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu)
2954 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2955 u64 esr = kvm_vcpu_get_esr(vcpu);
2956 struct sys_reg_params params;
2958 /* UNDEF on any unhandled register access */
2959 if (!kvm_esr_cp10_id_to_sys64(esr, ¶ms)) {
2960 kvm_inject_undefined(vcpu);
2964 if (emulate_sys_reg(vcpu, ¶ms))
2965 vcpu_set_reg(vcpu, Rt, params.regval);
2971 * kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where
2972 * CRn=0, which corresponds to the AArch32 feature
2974 * @vcpu: the vCPU pointer
2975 * @params: the system register access parameters.
2977 * Our cp15 system register tables do not enumerate the AArch32 feature
2978 * registers. Conveniently, our AArch64 table does, and the AArch32 system
2979 * register encoding can be trivially remapped into the AArch64 for the feature
2980 * registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same.
2982 * According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit
2983 * System registers with (coproc=0b1111, CRn==c0)", read accesses from this
2984 * range are either UNKNOWN or RES0. Rerouting remains architectural as we
2985 * treat undefined registers in this range as RAZ.
2987 static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu,
2988 struct sys_reg_params *params)
2990 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2992 /* Treat impossible writes to RO registers as UNDEFINED */
2993 if (params->is_write) {
2994 unhandled_cp_access(vcpu, params);
3001 * All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32.
3002 * Avoid conflicting with future expansion of AArch64 feature registers
3003 * and simply treat them as RAZ here.
3005 if (params->CRm > 3)
3007 else if (!emulate_sys_reg(vcpu, params))
3010 vcpu_set_reg(vcpu, Rt, params->regval);
3015 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
3016 * @vcpu: The VCPU pointer
3017 * @run: The kvm_run struct
3019 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
3020 struct sys_reg_params *params,
3021 const struct sys_reg_desc *global,
3024 int Rt = kvm_vcpu_sys_get_rt(vcpu);
3026 params->regval = vcpu_get_reg(vcpu, Rt);
3028 if (emulate_cp(vcpu, params, global, nr_global)) {
3029 if (!params->is_write)
3030 vcpu_set_reg(vcpu, Rt, params->regval);
3034 unhandled_cp_access(vcpu, params);
3038 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
3040 return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
3043 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
3045 struct sys_reg_params params;
3047 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
3050 * Certain AArch32 ID registers are handled by rerouting to the AArch64
3051 * system register table. Registers in the ID range where CRm=0 are
3052 * excluded from this scheme as they do not trivially map into AArch64
3053 * system register encodings.
3055 if (params.Op1 == 0 && params.CRn == 0 && params.CRm)
3056 return kvm_emulate_cp15_id_reg(vcpu, ¶ms);
3058 return kvm_handle_cp_32(vcpu, ¶ms, cp15_regs, ARRAY_SIZE(cp15_regs));
3061 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
3063 return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
3066 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
3068 struct sys_reg_params params;
3070 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
3072 return kvm_handle_cp_32(vcpu, ¶ms, cp14_regs, ARRAY_SIZE(cp14_regs));
3075 static bool is_imp_def_sys_reg(struct sys_reg_params *params)
3077 // See ARM DDI 0487E.a, section D12.3.2
3078 return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
3082 * emulate_sys_reg - Emulate a guest access to an AArch64 system register
3083 * @vcpu: The VCPU pointer
3084 * @params: Decoded system register parameters
3086 * Return: true if the system register access was successful, false otherwise.
3088 static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
3089 struct sys_reg_params *params)
3091 const struct sys_reg_desc *r;
3093 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
3096 perform_access(vcpu, params, r);
3100 if (is_imp_def_sys_reg(params)) {
3101 kvm_inject_undefined(vcpu);
3103 print_sys_reg_msg(params,
3104 "Unsupported guest sys_reg access at: %lx [%08lx]\n",
3105 *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
3106 kvm_inject_undefined(vcpu);
3111 static void kvm_reset_id_regs(struct kvm_vcpu *vcpu)
3113 const struct sys_reg_desc *idreg = first_idreg;
3114 u32 id = reg_to_encoding(idreg);
3115 struct kvm *kvm = vcpu->kvm;
3117 if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
3120 lockdep_assert_held(&kvm->arch.config_lock);
3122 /* Initialize all idregs */
3123 while (is_id_reg(id)) {
3124 IDREG(kvm, id) = idreg->reset(vcpu, idreg);
3127 id = reg_to_encoding(idreg);
3130 set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
3134 * kvm_reset_sys_regs - sets system registers to reset value
3135 * @vcpu: The VCPU pointer
3137 * This function finds the right table above and sets the registers on the
3138 * virtual CPU struct to their architecturally defined reset values.
3140 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
3144 kvm_reset_id_regs(vcpu);
3146 for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
3147 const struct sys_reg_desc *r = &sys_reg_descs[i];
3149 if (is_id_reg(reg_to_encoding(r)))
3158 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
3159 * @vcpu: The VCPU pointer
3161 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
3163 struct sys_reg_params params;
3164 unsigned long esr = kvm_vcpu_get_esr(vcpu);
3165 int Rt = kvm_vcpu_sys_get_rt(vcpu);
3167 trace_kvm_handle_sys_reg(esr);
3169 params = esr_sys64_to_params(esr);
3170 params.regval = vcpu_get_reg(vcpu, Rt);
3172 if (!emulate_sys_reg(vcpu, ¶ms))
3175 if (!params.is_write)
3176 vcpu_set_reg(vcpu, Rt, params.regval);
3180 /******************************************************************************
3182 *****************************************************************************/
3184 static bool index_to_params(u64 id, struct sys_reg_params *params)
3186 switch (id & KVM_REG_SIZE_MASK) {
3187 case KVM_REG_SIZE_U64:
3188 /* Any unused index bits means it's not valid. */
3189 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
3190 | KVM_REG_ARM_COPROC_MASK
3191 | KVM_REG_ARM64_SYSREG_OP0_MASK
3192 | KVM_REG_ARM64_SYSREG_OP1_MASK
3193 | KVM_REG_ARM64_SYSREG_CRN_MASK
3194 | KVM_REG_ARM64_SYSREG_CRM_MASK
3195 | KVM_REG_ARM64_SYSREG_OP2_MASK))
3197 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
3198 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
3199 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
3200 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
3201 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
3202 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
3203 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
3204 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
3205 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
3206 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
3213 const struct sys_reg_desc *get_reg_by_id(u64 id,
3214 const struct sys_reg_desc table[],
3217 struct sys_reg_params params;
3219 if (!index_to_params(id, ¶ms))
3222 return find_reg(¶ms, table, num);
3225 /* Decode an index value, and find the sys_reg_desc entry. */
3226 static const struct sys_reg_desc *
3227 id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
3228 const struct sys_reg_desc table[], unsigned int num)
3231 const struct sys_reg_desc *r;
3233 /* We only do sys_reg for now. */
3234 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
3237 r = get_reg_by_id(id, table, num);
3239 /* Not saved in the sys_reg array and not otherwise accessible? */
3240 if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r)))
3247 * These are the invariant sys_reg registers: we let the guest see the
3248 * host versions of these, so they're part of the guest state.
3250 * A future CPU may provide a mechanism to present different values to
3251 * the guest, or a future kvm may trap them.
3254 #define FUNCTION_INVARIANT(reg) \
3255 static u64 get_##reg(struct kvm_vcpu *v, \
3256 const struct sys_reg_desc *r) \
3258 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
3259 return ((struct sys_reg_desc *)r)->val; \
3262 FUNCTION_INVARIANT(midr_el1)
3263 FUNCTION_INVARIANT(revidr_el1)
3264 FUNCTION_INVARIANT(aidr_el1)
3266 static u64 get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
3268 ((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
3269 return ((struct sys_reg_desc *)r)->val;
3272 /* ->val is filled in by kvm_sys_reg_table_init() */
3273 static struct sys_reg_desc invariant_sys_regs[] __ro_after_init = {
3274 { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
3275 { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
3276 { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
3277 { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
3280 static int get_invariant_sys_reg(u64 id, u64 __user *uaddr)
3282 const struct sys_reg_desc *r;
3284 r = get_reg_by_id(id, invariant_sys_regs,
3285 ARRAY_SIZE(invariant_sys_regs));
3289 return put_user(r->val, uaddr);
3292 static int set_invariant_sys_reg(u64 id, u64 __user *uaddr)
3294 const struct sys_reg_desc *r;
3297 r = get_reg_by_id(id, invariant_sys_regs,
3298 ARRAY_SIZE(invariant_sys_regs));
3302 if (get_user(val, uaddr))
3305 /* This is what we mean by invariant: you can't change it. */
3312 static int demux_c15_get(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
3315 u32 __user *uval = uaddr;
3317 /* Fail if we have unknown bits set. */
3318 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
3319 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
3322 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
3323 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
3324 if (KVM_REG_SIZE(id) != 4)
3326 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
3327 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
3328 if (val >= CSSELR_MAX)
3331 return put_user(get_ccsidr(vcpu, val), uval);
3337 static int demux_c15_set(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
3340 u32 __user *uval = uaddr;
3342 /* Fail if we have unknown bits set. */
3343 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
3344 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
3347 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
3348 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
3349 if (KVM_REG_SIZE(id) != 4)
3351 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
3352 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
3353 if (val >= CSSELR_MAX)
3356 if (get_user(newval, uval))
3359 return set_ccsidr(vcpu, val, newval);
3365 int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
3366 const struct sys_reg_desc table[], unsigned int num)
3368 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
3369 const struct sys_reg_desc *r;
3373 r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
3374 if (!r || sysreg_hidden_user(vcpu, r))
3378 ret = (r->get_user)(vcpu, r, &val);
3380 val = __vcpu_sys_reg(vcpu, r->reg);
3385 ret = put_user(val, uaddr);
3390 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
3392 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
3395 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
3396 return demux_c15_get(vcpu, reg->id, uaddr);
3398 err = get_invariant_sys_reg(reg->id, uaddr);
3402 return kvm_sys_reg_get_user(vcpu, reg,
3403 sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
3406 int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
3407 const struct sys_reg_desc table[], unsigned int num)
3409 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
3410 const struct sys_reg_desc *r;
3414 if (get_user(val, uaddr))
3417 r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
3418 if (!r || sysreg_hidden_user(vcpu, r))
3421 if (sysreg_user_write_ignore(vcpu, r))
3425 ret = (r->set_user)(vcpu, r, val);
3427 __vcpu_sys_reg(vcpu, r->reg) = val;
3434 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
3436 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
3439 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
3440 return demux_c15_set(vcpu, reg->id, uaddr);
3442 err = set_invariant_sys_reg(reg->id, uaddr);
3446 return kvm_sys_reg_set_user(vcpu, reg,
3447 sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
3450 static unsigned int num_demux_regs(void)
3455 static int write_demux_regids(u64 __user *uindices)
3457 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
3460 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
3461 for (i = 0; i < CSSELR_MAX; i++) {
3462 if (put_user(val | i, uindices))
3469 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
3471 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
3472 KVM_REG_ARM64_SYSREG |
3473 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
3474 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
3475 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
3476 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
3477 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
3480 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
3485 if (put_user(sys_reg_to_index(reg), *uind))
3492 static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
3493 const struct sys_reg_desc *rd,
3495 unsigned int *total)
3498 * Ignore registers we trap but don't save,
3499 * and for which no custom user accessor is provided.
3501 if (!(rd->reg || rd->get_user))
3504 if (sysreg_hidden_user(vcpu, rd))
3507 if (!copy_reg_to_user(rd, uind))
3514 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
3515 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
3517 const struct sys_reg_desc *i2, *end2;
3518 unsigned int total = 0;
3522 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
3524 while (i2 != end2) {
3525 err = walk_one_sys_reg(vcpu, i2++, &uind, &total);
3532 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
3534 return ARRAY_SIZE(invariant_sys_regs)
3536 + walk_sys_regs(vcpu, (u64 __user *)NULL);
3539 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
3544 /* Then give them all the invariant registers' indices. */
3545 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
3546 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
3551 err = walk_sys_regs(vcpu, uindices);
3556 return write_demux_regids(uindices);
3559 int __init kvm_sys_reg_table_init(void)
3561 struct sys_reg_params params;
3565 /* Make sure tables are unique and in order. */
3566 valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false);
3567 valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true);
3568 valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true);
3569 valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true);
3570 valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true);
3571 valid &= check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false);
3576 /* We abuse the reset function to overwrite the table itself. */
3577 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
3578 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
3580 /* Find the first idreg (SYS_ID_PFR0_EL1) in sys_reg_descs. */
3581 params = encoding_to_params(SYS_ID_PFR0_EL1);
3582 first_idreg = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));