1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/kvm/coproc.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Authors: Rusty Russell <rusty@rustcorp.com.au>
9 * Christoffer Dall <c.dall@virtualopensystems.com>
12 #include <linux/bitfield.h>
13 #include <linux/bsearch.h>
14 #include <linux/cacheinfo.h>
15 #include <linux/kvm_host.h>
17 #include <linux/printk.h>
18 #include <linux/uaccess.h>
20 #include <asm/cacheflush.h>
21 #include <asm/cputype.h>
22 #include <asm/debug-monitors.h>
24 #include <asm/kvm_arm.h>
25 #include <asm/kvm_emulate.h>
26 #include <asm/kvm_hyp.h>
27 #include <asm/kvm_mmu.h>
28 #include <asm/kvm_nested.h>
29 #include <asm/perf_event.h>
30 #include <asm/sysreg.h>
32 #include <trace/events/kvm.h>
39 * For AArch32, we only take care of what is being trapped. Anything
40 * that has to do with init and userspace access has to go via the
44 static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
46 static bool read_from_write_only(struct kvm_vcpu *vcpu,
47 struct sys_reg_params *params,
48 const struct sys_reg_desc *r)
50 WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
51 print_sys_reg_instr(params);
52 kvm_inject_undefined(vcpu);
56 static bool write_to_read_only(struct kvm_vcpu *vcpu,
57 struct sys_reg_params *params,
58 const struct sys_reg_desc *r)
60 WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
61 print_sys_reg_instr(params);
62 kvm_inject_undefined(vcpu);
66 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
68 u64 val = 0x8badf00d8badf00d;
70 if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
71 __vcpu_read_sys_reg_from_cpu(reg, &val))
74 return __vcpu_sys_reg(vcpu, reg);
77 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
79 if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
80 __vcpu_write_sys_reg_to_cpu(val, reg))
83 __vcpu_sys_reg(vcpu, reg) = val;
86 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
90 * Returns the minimum line size for the selected cache, expressed as
93 static u8 get_min_cache_line_size(bool icache)
95 u64 ctr = read_sanitised_ftr_reg(SYS_CTR_EL0);
99 field = SYS_FIELD_GET(CTR_EL0, IminLine, ctr);
101 field = SYS_FIELD_GET(CTR_EL0, DminLine, ctr);
104 * Cache line size is represented as Log2(words) in CTR_EL0.
105 * Log2(bytes) can be derived with the following:
107 * Log2(words) + 2 = Log2(bytes / 4) + 2
108 * = Log2(bytes) - 2 + 2
114 /* Which cache CCSIDR represents depends on CSSELR value. */
115 static u32 get_ccsidr(struct kvm_vcpu *vcpu, u32 csselr)
119 if (vcpu->arch.ccsidr)
120 return vcpu->arch.ccsidr[csselr];
122 line_size = get_min_cache_line_size(csselr & CSSELR_EL1_InD);
125 * Fabricate a CCSIDR value as the overriding value does not exist.
126 * The real CCSIDR value will not be used as it can vary by the
127 * physical CPU which the vcpu currently resides in.
129 * The line size is determined with get_min_cache_line_size(), which
130 * should be valid for all CPUs even if they have different cache
133 * The associativity bits are cleared, meaning the geometry of all data
134 * and unified caches (which are guaranteed to be PIPT and thus
135 * non-aliasing) are 1 set and 1 way.
136 * Guests should not be doing cache operations by set/way at all, and
137 * for this reason, we trap them and attempt to infer the intent, so
138 * that we can flush the entire guest's address space at the appropriate
139 * time. The exposed geometry minimizes the number of the traps.
140 * [If guests should attempt to infer aliasing properties from the
141 * geometry (which is not permitted by the architecture), they would
142 * only do so for virtually indexed caches.]
144 * We don't check if the cache level exists as it is allowed to return
145 * an UNKNOWN value if not.
147 return SYS_FIELD_PREP(CCSIDR_EL1, LineSize, line_size - 4);
150 static int set_ccsidr(struct kvm_vcpu *vcpu, u32 csselr, u32 val)
152 u8 line_size = FIELD_GET(CCSIDR_EL1_LineSize, val) + 4;
153 u32 *ccsidr = vcpu->arch.ccsidr;
156 if ((val & CCSIDR_EL1_RES0) ||
157 line_size < get_min_cache_line_size(csselr & CSSELR_EL1_InD))
161 if (val == get_ccsidr(vcpu, csselr))
164 ccsidr = kmalloc_array(CSSELR_MAX, sizeof(u32), GFP_KERNEL_ACCOUNT);
168 for (i = 0; i < CSSELR_MAX; i++)
169 ccsidr[i] = get_ccsidr(vcpu, i);
171 vcpu->arch.ccsidr = ccsidr;
174 ccsidr[csselr] = val;
179 static bool access_rw(struct kvm_vcpu *vcpu,
180 struct sys_reg_params *p,
181 const struct sys_reg_desc *r)
184 vcpu_write_sys_reg(vcpu, p->regval, r->reg);
186 p->regval = vcpu_read_sys_reg(vcpu, r->reg);
192 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
194 static bool access_dcsw(struct kvm_vcpu *vcpu,
195 struct sys_reg_params *p,
196 const struct sys_reg_desc *r)
199 return read_from_write_only(vcpu, p, r);
202 * Only track S/W ops if we don't have FWB. It still indicates
203 * that the guest is a bit broken (S/W operations should only
204 * be done by firmware, knowing that there is only a single
205 * CPU left in the system, and certainly not from non-secure
208 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
209 kvm_set_way_flush(vcpu);
214 static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
216 switch (r->aarch32_map) {
218 *mask = GENMASK_ULL(31, 0);
222 *mask = GENMASK_ULL(63, 32);
226 *mask = GENMASK_ULL(63, 0);
233 * Generic accessor for VM registers. Only called as long as HCR_TVM
234 * is set. If the guest enables the MMU, we stop trapping the VM
235 * sys_regs and leave it in complete control of the caches.
237 static bool access_vm_reg(struct kvm_vcpu *vcpu,
238 struct sys_reg_params *p,
239 const struct sys_reg_desc *r)
241 bool was_enabled = vcpu_has_cache_enabled(vcpu);
242 u64 val, mask, shift;
244 BUG_ON(!p->is_write);
246 get_access_mask(r, &mask, &shift);
249 val = vcpu_read_sys_reg(vcpu, r->reg);
255 val |= (p->regval & (mask >> shift)) << shift;
256 vcpu_write_sys_reg(vcpu, val, r->reg);
258 kvm_toggle_cache(vcpu, was_enabled);
262 static bool access_actlr(struct kvm_vcpu *vcpu,
263 struct sys_reg_params *p,
264 const struct sys_reg_desc *r)
269 return ignore_write(vcpu, p);
271 get_access_mask(r, &mask, &shift);
272 p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift;
278 * Trap handler for the GICv3 SGI generation system register.
279 * Forward the request to the VGIC emulation.
280 * The cp15_64 code makes sure this automatically works
281 * for both AArch64 and AArch32 accesses.
283 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
284 struct sys_reg_params *p,
285 const struct sys_reg_desc *r)
290 return read_from_write_only(vcpu, p, r);
293 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
294 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
295 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
296 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
299 if (p->Op0 == 0) { /* AArch32 */
301 default: /* Keep GCC quiet */
302 case 0: /* ICC_SGI1R */
305 case 1: /* ICC_ASGI1R */
306 case 2: /* ICC_SGI0R */
310 } else { /* AArch64 */
312 default: /* Keep GCC quiet */
313 case 5: /* ICC_SGI1R_EL1 */
316 case 6: /* ICC_ASGI1R_EL1 */
317 case 7: /* ICC_SGI0R_EL1 */
323 vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
328 static bool access_gic_sre(struct kvm_vcpu *vcpu,
329 struct sys_reg_params *p,
330 const struct sys_reg_desc *r)
333 return ignore_write(vcpu, p);
335 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
339 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
340 struct sys_reg_params *p,
341 const struct sys_reg_desc *r)
344 return ignore_write(vcpu, p);
346 return read_zero(vcpu, p);
349 static bool trap_undef(struct kvm_vcpu *vcpu,
350 struct sys_reg_params *p,
351 const struct sys_reg_desc *r)
353 kvm_inject_undefined(vcpu);
358 * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
359 * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
360 * system, these registers should UNDEF. LORID_EL1 being a RO register, we
361 * treat it separately.
363 static bool trap_loregion(struct kvm_vcpu *vcpu,
364 struct sys_reg_params *p,
365 const struct sys_reg_desc *r)
367 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
368 u32 sr = reg_to_encoding(r);
370 if (!(val & (0xfUL << ID_AA64MMFR1_EL1_LO_SHIFT))) {
371 kvm_inject_undefined(vcpu);
375 if (p->is_write && sr == SYS_LORID_EL1)
376 return write_to_read_only(vcpu, p, r);
378 return trap_raz_wi(vcpu, p, r);
381 static bool trap_oslar_el1(struct kvm_vcpu *vcpu,
382 struct sys_reg_params *p,
383 const struct sys_reg_desc *r)
388 return read_from_write_only(vcpu, p, r);
390 /* Forward the OSLK bit to OSLSR */
391 oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~SYS_OSLSR_OSLK;
392 if (p->regval & SYS_OSLAR_OSLK)
393 oslsr |= SYS_OSLSR_OSLK;
395 __vcpu_sys_reg(vcpu, OSLSR_EL1) = oslsr;
399 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
400 struct sys_reg_params *p,
401 const struct sys_reg_desc *r)
404 return write_to_read_only(vcpu, p, r);
406 p->regval = __vcpu_sys_reg(vcpu, r->reg);
410 static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
414 * The only modifiable bit is the OSLK bit. Refuse the write if
415 * userspace attempts to change any other bit in the register.
417 if ((val ^ rd->val) & ~SYS_OSLSR_OSLK)
420 __vcpu_sys_reg(vcpu, rd->reg) = val;
424 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
425 struct sys_reg_params *p,
426 const struct sys_reg_desc *r)
429 return ignore_write(vcpu, p);
431 p->regval = read_sysreg(dbgauthstatus_el1);
437 * We want to avoid world-switching all the DBG registers all the
440 * - If we've touched any debug register, it is likely that we're
441 * going to touch more of them. It then makes sense to disable the
442 * traps and start doing the save/restore dance
443 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
444 * then mandatory to save/restore the registers, as the guest
447 * For this, we use a DIRTY bit, indicating the guest has modified the
448 * debug registers, used as follow:
451 * - If the dirty bit is set (because we're coming back from trapping),
452 * disable the traps, save host registers, restore guest registers.
453 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
454 * set the dirty bit, disable the traps, save host registers,
455 * restore guest registers.
456 * - Otherwise, enable the traps
459 * - If the dirty bit is set, save guest registers, restore host
460 * registers and clear the dirty bit. This ensure that the host can
461 * now use the debug registers.
463 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
464 struct sys_reg_params *p,
465 const struct sys_reg_desc *r)
467 access_rw(vcpu, p, r);
469 vcpu_set_flag(vcpu, DEBUG_DIRTY);
471 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
477 * reg_to_dbg/dbg_to_reg
479 * A 32 bit write to a debug register leave top bits alone
480 * A 32 bit read from a debug register only returns the bottom bits
482 * All writes will set the DEBUG_DIRTY flag to ensure the hyp code
483 * switches between host and guest values in future.
485 static void reg_to_dbg(struct kvm_vcpu *vcpu,
486 struct sys_reg_params *p,
487 const struct sys_reg_desc *rd,
490 u64 mask, shift, val;
492 get_access_mask(rd, &mask, &shift);
496 val |= (p->regval & (mask >> shift)) << shift;
499 vcpu_set_flag(vcpu, DEBUG_DIRTY);
502 static void dbg_to_reg(struct kvm_vcpu *vcpu,
503 struct sys_reg_params *p,
504 const struct sys_reg_desc *rd,
509 get_access_mask(rd, &mask, &shift);
510 p->regval = (*dbg_reg & mask) >> shift;
513 static bool trap_bvr(struct kvm_vcpu *vcpu,
514 struct sys_reg_params *p,
515 const struct sys_reg_desc *rd)
517 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
520 reg_to_dbg(vcpu, p, rd, dbg_reg);
522 dbg_to_reg(vcpu, p, rd, dbg_reg);
524 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
529 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
532 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = val;
536 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
539 *val = vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
543 static void reset_bvr(struct kvm_vcpu *vcpu,
544 const struct sys_reg_desc *rd)
546 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
549 static bool trap_bcr(struct kvm_vcpu *vcpu,
550 struct sys_reg_params *p,
551 const struct sys_reg_desc *rd)
553 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
556 reg_to_dbg(vcpu, p, rd, dbg_reg);
558 dbg_to_reg(vcpu, p, rd, dbg_reg);
560 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
565 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
568 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = val;
572 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
575 *val = vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
579 static void reset_bcr(struct kvm_vcpu *vcpu,
580 const struct sys_reg_desc *rd)
582 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
585 static bool trap_wvr(struct kvm_vcpu *vcpu,
586 struct sys_reg_params *p,
587 const struct sys_reg_desc *rd)
589 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
592 reg_to_dbg(vcpu, p, rd, dbg_reg);
594 dbg_to_reg(vcpu, p, rd, dbg_reg);
596 trace_trap_reg(__func__, rd->CRm, p->is_write,
597 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
602 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
605 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = val;
609 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
612 *val = vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
616 static void reset_wvr(struct kvm_vcpu *vcpu,
617 const struct sys_reg_desc *rd)
619 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
622 static bool trap_wcr(struct kvm_vcpu *vcpu,
623 struct sys_reg_params *p,
624 const struct sys_reg_desc *rd)
626 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
629 reg_to_dbg(vcpu, p, rd, dbg_reg);
631 dbg_to_reg(vcpu, p, rd, dbg_reg);
633 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
638 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
641 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = val;
645 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
648 *val = vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
652 static void reset_wcr(struct kvm_vcpu *vcpu,
653 const struct sys_reg_desc *rd)
655 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
658 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
660 u64 amair = read_sysreg(amair_el1);
661 vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
664 static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
666 u64 actlr = read_sysreg(actlr_el1);
667 vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
670 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
675 * Map the vcpu_id into the first three affinity level fields of
676 * the MPIDR. We limit the number of VCPUs in level 0 due to a
677 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
678 * of the GICv3 to be able to address each CPU directly when
681 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
682 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
683 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
684 vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
687 static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
688 const struct sys_reg_desc *r)
690 if (kvm_vcpu_has_pmu(vcpu))
696 static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
698 u64 n, mask = BIT(ARMV8_PMU_CYCLE_IDX);
700 /* No PMU available, any PMU reg may UNDEF... */
701 if (!kvm_arm_support_pmu_v3())
704 n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
705 n &= ARMV8_PMU_PMCR_N_MASK;
707 mask |= GENMASK(n - 1, 0);
709 reset_unknown(vcpu, r);
710 __vcpu_sys_reg(vcpu, r->reg) &= mask;
713 static void reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
715 reset_unknown(vcpu, r);
716 __vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
719 static void reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
721 reset_unknown(vcpu, r);
722 __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK;
725 static void reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
727 reset_unknown(vcpu, r);
728 __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
731 static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
735 /* No PMU available, PMCR_EL0 may UNDEF... */
736 if (!kvm_arm_support_pmu_v3())
739 /* Only preserve PMCR_EL0.N, and reset the rest to 0 */
740 pmcr = read_sysreg(pmcr_el0) & (ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
741 if (!kvm_supports_32bit_el0())
742 pmcr |= ARMV8_PMU_PMCR_LC;
744 __vcpu_sys_reg(vcpu, r->reg) = pmcr;
747 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
749 u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
750 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
753 kvm_inject_undefined(vcpu);
758 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
760 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
763 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
765 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
768 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
770 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
773 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
775 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
778 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
779 const struct sys_reg_desc *r)
783 if (pmu_access_el0_disabled(vcpu))
788 * Only update writeable bits of PMCR (continuing into
789 * kvm_pmu_handle_pmcr() as well)
791 val = __vcpu_sys_reg(vcpu, PMCR_EL0);
792 val &= ~ARMV8_PMU_PMCR_MASK;
793 val |= p->regval & ARMV8_PMU_PMCR_MASK;
794 if (!kvm_supports_32bit_el0())
795 val |= ARMV8_PMU_PMCR_LC;
796 kvm_pmu_handle_pmcr(vcpu, val);
798 /* PMCR.P & PMCR.C are RAZ */
799 val = __vcpu_sys_reg(vcpu, PMCR_EL0)
800 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
807 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
808 const struct sys_reg_desc *r)
810 if (pmu_access_event_counter_el0_disabled(vcpu))
814 __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
816 /* return PMSELR.SEL field */
817 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
818 & ARMV8_PMU_COUNTER_MASK;
823 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
824 const struct sys_reg_desc *r)
826 u64 pmceid, mask, shift;
830 if (pmu_access_el0_disabled(vcpu))
833 get_access_mask(r, &mask, &shift);
835 pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
844 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
848 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
849 val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
850 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
851 kvm_inject_undefined(vcpu);
858 static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
863 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
865 idx = ARMV8_PMU_CYCLE_IDX;
868 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
870 *val = kvm_pmu_get_counter_value(vcpu, idx);
874 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
875 struct sys_reg_params *p,
876 const struct sys_reg_desc *r)
880 if (r->CRn == 9 && r->CRm == 13) {
883 if (pmu_access_event_counter_el0_disabled(vcpu))
886 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
887 & ARMV8_PMU_COUNTER_MASK;
888 } else if (r->Op2 == 0) {
890 if (pmu_access_cycle_counter_el0_disabled(vcpu))
893 idx = ARMV8_PMU_CYCLE_IDX;
895 } else if (r->CRn == 0 && r->CRm == 9) {
897 if (pmu_access_event_counter_el0_disabled(vcpu))
900 idx = ARMV8_PMU_CYCLE_IDX;
901 } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
903 if (pmu_access_event_counter_el0_disabled(vcpu))
906 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
909 /* Catch any decoding mistake */
910 WARN_ON(idx == ~0UL);
912 if (!pmu_counter_idx_valid(vcpu, idx))
916 if (pmu_access_el0_disabled(vcpu))
919 kvm_pmu_set_counter_value(vcpu, idx, p->regval);
921 p->regval = kvm_pmu_get_counter_value(vcpu, idx);
927 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
928 const struct sys_reg_desc *r)
932 if (pmu_access_el0_disabled(vcpu))
935 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
937 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
938 reg = PMEVTYPER0_EL0 + idx;
939 } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
940 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
941 if (idx == ARMV8_PMU_CYCLE_IDX)
945 reg = PMEVTYPER0_EL0 + idx;
950 if (!pmu_counter_idx_valid(vcpu, idx))
954 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
955 __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
956 kvm_vcpu_pmu_restore_guest(vcpu);
958 p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
964 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
965 const struct sys_reg_desc *r)
969 if (pmu_access_el0_disabled(vcpu))
972 mask = kvm_pmu_valid_counter_mask(vcpu);
974 val = p->regval & mask;
976 /* accessing PMCNTENSET_EL0 */
977 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
978 kvm_pmu_enable_counter_mask(vcpu, val);
979 kvm_vcpu_pmu_restore_guest(vcpu);
981 /* accessing PMCNTENCLR_EL0 */
982 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
983 kvm_pmu_disable_counter_mask(vcpu, val);
986 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
992 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
993 const struct sys_reg_desc *r)
995 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
997 if (check_pmu_access_disabled(vcpu, 0))
1001 u64 val = p->regval & mask;
1004 /* accessing PMINTENSET_EL1 */
1005 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
1007 /* accessing PMINTENCLR_EL1 */
1008 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
1010 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
1016 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1017 const struct sys_reg_desc *r)
1019 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
1021 if (pmu_access_el0_disabled(vcpu))
1026 /* accessing PMOVSSET_EL0 */
1027 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
1029 /* accessing PMOVSCLR_EL0 */
1030 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
1032 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
1038 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1039 const struct sys_reg_desc *r)
1044 return read_from_write_only(vcpu, p, r);
1046 if (pmu_write_swinc_el0_disabled(vcpu))
1049 mask = kvm_pmu_valid_counter_mask(vcpu);
1050 kvm_pmu_software_increment(vcpu, p->regval & mask);
1054 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1055 const struct sys_reg_desc *r)
1058 if (!vcpu_mode_priv(vcpu)) {
1059 kvm_inject_undefined(vcpu);
1063 __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
1064 p->regval & ARMV8_PMU_USERENR_MASK;
1066 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
1067 & ARMV8_PMU_USERENR_MASK;
1073 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
1074 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
1075 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
1076 trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
1077 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
1078 trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
1079 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
1080 trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
1081 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
1082 trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
1084 #define PMU_SYS_REG(r) \
1085 SYS_DESC(r), .reset = reset_pmu_reg, .visibility = pmu_visibility
1087 /* Macro to expand the PMEVCNTRn_EL0 register */
1088 #define PMU_PMEVCNTR_EL0(n) \
1089 { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
1090 .reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \
1091 .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
1093 /* Macro to expand the PMEVTYPERn_EL0 register */
1094 #define PMU_PMEVTYPER_EL0(n) \
1095 { PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \
1096 .reset = reset_pmevtyper, \
1097 .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
1099 static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1100 const struct sys_reg_desc *r)
1102 kvm_inject_undefined(vcpu);
1107 /* Macro to expand the AMU counter and type registers*/
1108 #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
1109 #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
1110 #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
1111 #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
1113 static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1114 const struct sys_reg_desc *rd)
1116 return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
1120 * If we land here on a PtrAuth access, that is because we didn't
1121 * fixup the access on exit by allowing the PtrAuth sysregs. The only
1122 * way this happens is when the guest does not have PtrAuth support
1125 #define __PTRAUTH_KEY(k) \
1126 { SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \
1127 .visibility = ptrauth_visibility}
1129 #define PTRAUTH_KEY(k) \
1130 __PTRAUTH_KEY(k ## KEYLO_EL1), \
1131 __PTRAUTH_KEY(k ## KEYHI_EL1)
1133 static bool access_arch_timer(struct kvm_vcpu *vcpu,
1134 struct sys_reg_params *p,
1135 const struct sys_reg_desc *r)
1137 enum kvm_arch_timers tmr;
1138 enum kvm_arch_timer_regs treg;
1139 u64 reg = reg_to_encoding(r);
1142 case SYS_CNTP_TVAL_EL0:
1143 case SYS_AARCH32_CNTP_TVAL:
1145 treg = TIMER_REG_TVAL;
1147 case SYS_CNTP_CTL_EL0:
1148 case SYS_AARCH32_CNTP_CTL:
1150 treg = TIMER_REG_CTL;
1152 case SYS_CNTP_CVAL_EL0:
1153 case SYS_AARCH32_CNTP_CVAL:
1155 treg = TIMER_REG_CVAL;
1157 case SYS_CNTPCT_EL0:
1158 case SYS_CNTPCTSS_EL0:
1159 case SYS_AARCH32_CNTPCT:
1161 treg = TIMER_REG_CNT;
1164 print_sys_reg_msg(p, "%s", "Unhandled trapped timer register");
1165 kvm_inject_undefined(vcpu);
1170 kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1172 p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
1177 static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu)
1179 if (kvm_vcpu_has_pmu(vcpu))
1180 return vcpu->kvm->arch.dfr0_pmuver.imp;
1182 return vcpu->kvm->arch.dfr0_pmuver.unimp;
1185 static u8 perfmon_to_pmuver(u8 perfmon)
1188 case ID_DFR0_EL1_PerfMon_PMUv3:
1189 return ID_AA64DFR0_EL1_PMUVer_IMP;
1190 case ID_DFR0_EL1_PerfMon_IMPDEF:
1191 return ID_AA64DFR0_EL1_PMUVer_IMP_DEF;
1193 /* Anything ARMv8.1+ and NI have the same value. For now. */
1198 static u8 pmuver_to_perfmon(u8 pmuver)
1201 case ID_AA64DFR0_EL1_PMUVer_IMP:
1202 return ID_DFR0_EL1_PerfMon_PMUv3;
1203 case ID_AA64DFR0_EL1_PMUVer_IMP_DEF:
1204 return ID_DFR0_EL1_PerfMon_IMPDEF;
1206 /* Anything ARMv8.1+ and NI have the same value. For now. */
1211 /* Read a sanitised cpufeature ID register by sys_reg_desc */
1212 static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r)
1214 u32 id = reg_to_encoding(r);
1217 if (sysreg_visible_as_raz(vcpu, r))
1220 val = read_sanitised_ftr_reg(id);
1223 case SYS_ID_AA64PFR0_EL1:
1224 if (!vcpu_has_sve(vcpu))
1225 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE);
1226 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU);
1227 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2);
1228 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
1229 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3);
1230 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
1231 if (kvm_vgic_global_state.type == VGIC_V3) {
1232 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC);
1233 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1);
1236 case SYS_ID_AA64PFR1_EL1:
1237 if (!kvm_has_mte(vcpu->kvm))
1238 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
1240 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
1242 case SYS_ID_AA64ISAR1_EL1:
1243 if (!vcpu_has_ptrauth(vcpu))
1244 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
1245 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
1246 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
1247 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
1249 case SYS_ID_AA64ISAR2_EL1:
1250 if (!vcpu_has_ptrauth(vcpu))
1251 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
1252 ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
1253 if (!cpus_have_final_cap(ARM64_HAS_WFXT))
1254 val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
1256 case SYS_ID_AA64DFR0_EL1:
1257 /* Limit debug to ARMv8.0 */
1258 val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer);
1259 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), 6);
1260 /* Set PMUver to the required version */
1261 val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
1262 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
1264 /* Hide SPE from guests */
1265 val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer);
1267 case SYS_ID_DFR0_EL1:
1268 val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
1269 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon),
1270 pmuver_to_perfmon(vcpu_pmuver(vcpu)));
1272 case SYS_ID_AA64MMFR2_EL1:
1273 val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
1275 case SYS_ID_MMFR4_EL1:
1276 val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
1283 static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
1284 const struct sys_reg_desc *r)
1286 u32 id = reg_to_encoding(r);
1289 case SYS_ID_AA64ZFR0_EL1:
1290 if (!vcpu_has_sve(vcpu))
1298 static unsigned int aa32_id_visibility(const struct kvm_vcpu *vcpu,
1299 const struct sys_reg_desc *r)
1302 * AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any
1303 * EL. Promote to RAZ/WI in order to guarantee consistency between
1306 if (!kvm_supports_32bit_el0())
1307 return REG_RAZ | REG_USER_WI;
1309 return id_visibility(vcpu, r);
1312 static unsigned int raz_visibility(const struct kvm_vcpu *vcpu,
1313 const struct sys_reg_desc *r)
1318 /* cpufeature ID register access trap handlers */
1320 static bool access_id_reg(struct kvm_vcpu *vcpu,
1321 struct sys_reg_params *p,
1322 const struct sys_reg_desc *r)
1325 return write_to_read_only(vcpu, p, r);
1327 p->regval = read_id_reg(vcpu, r);
1328 if (vcpu_has_nv(vcpu))
1329 access_nested_id_reg(vcpu, p, r);
1334 /* Visibility overrides for SVE-specific control registers */
1335 static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
1336 const struct sys_reg_desc *rd)
1338 if (vcpu_has_sve(vcpu))
1344 static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
1345 const struct sys_reg_desc *rd,
1351 * Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as
1352 * it doesn't promise more than what is actually provided (the
1353 * guest could otherwise be covered in ectoplasmic residue).
1355 csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV2_SHIFT);
1357 (csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED))
1360 /* Same thing for CSV3 */
1361 csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV3_SHIFT);
1363 (csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED))
1366 /* We can only differ with CSV[23], and anything else is an error */
1367 val ^= read_id_reg(vcpu, rd);
1368 val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
1369 ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
1373 vcpu->kvm->arch.pfr0_csv2 = csv2;
1374 vcpu->kvm->arch.pfr0_csv3 = csv3;
1379 static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
1380 const struct sys_reg_desc *rd,
1383 u8 pmuver, host_pmuver;
1386 host_pmuver = kvm_arm_pmu_get_pmuver_limit();
1389 * Allow AA64DFR0_EL1.PMUver to be set from userspace as long
1390 * as it doesn't promise more than what the HW gives us. We
1391 * allow an IMPDEF PMU though, only if no PMU is supported
1392 * (KVM backward compatibility handling).
1394 pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), val);
1395 if ((pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF && pmuver > host_pmuver))
1398 valid_pmu = (pmuver != 0 && pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF);
1400 /* Make sure view register and PMU support do match */
1401 if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
1404 /* We can only differ with PMUver, and anything else is an error */
1405 val ^= read_id_reg(vcpu, rd);
1406 val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
1411 vcpu->kvm->arch.dfr0_pmuver.imp = pmuver;
1413 vcpu->kvm->arch.dfr0_pmuver.unimp = pmuver;
1418 static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
1419 const struct sys_reg_desc *rd,
1422 u8 perfmon, host_perfmon;
1425 host_perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
1428 * Allow DFR0_EL1.PerfMon to be set from userspace as long as
1429 * it doesn't promise more than what the HW gives us on the
1430 * AArch64 side (as everything is emulated with that), and
1431 * that this is a PMUv3.
1433 perfmon = FIELD_GET(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon), val);
1434 if ((perfmon != ID_DFR0_EL1_PerfMon_IMPDEF && perfmon > host_perfmon) ||
1435 (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3))
1438 valid_pmu = (perfmon != 0 && perfmon != ID_DFR0_EL1_PerfMon_IMPDEF);
1440 /* Make sure view register and PMU support do match */
1441 if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
1444 /* We can only differ with PerfMon, and anything else is an error */
1445 val ^= read_id_reg(vcpu, rd);
1446 val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
1451 vcpu->kvm->arch.dfr0_pmuver.imp = perfmon_to_pmuver(perfmon);
1453 vcpu->kvm->arch.dfr0_pmuver.unimp = perfmon_to_pmuver(perfmon);
1459 * cpufeature ID register user accessors
1461 * For now, these registers are immutable for userspace, so no values
1462 * are stored, and for set_id_reg() we don't allow the effective value
1465 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1468 *val = read_id_reg(vcpu, rd);
1472 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1475 /* This is what we mean by invariant: you can't change it. */
1476 if (val != read_id_reg(vcpu, rd))
1482 static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1489 static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1495 static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1496 const struct sys_reg_desc *r)
1499 return write_to_read_only(vcpu, p, r);
1501 p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
1505 static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1506 const struct sys_reg_desc *r)
1509 return write_to_read_only(vcpu, p, r);
1511 p->regval = __vcpu_sys_reg(vcpu, r->reg);
1516 * Fabricate a CLIDR_EL1 value instead of using the real value, which can vary
1517 * by the physical CPU which the vcpu currently resides in.
1519 static void reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
1521 u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
1525 if ((ctr_el0 & CTR_EL0_IDC)) {
1527 * Data cache clean to the PoU is not required so LoUU and LoUIS
1528 * will not be set and a unified cache, which will be marked as
1529 * LoC, will be added.
1531 * If not DIC, let the unified cache L2 so that an instruction
1532 * cache can be added as L1 later.
1534 loc = (ctr_el0 & CTR_EL0_DIC) ? 1 : 2;
1535 clidr = CACHE_TYPE_UNIFIED << CLIDR_CTYPE_SHIFT(loc);
1538 * Data cache clean to the PoU is required so let L1 have a data
1539 * cache and mark it as LoUU and LoUIS. As L1 has a data cache,
1540 * it can be marked as LoC too.
1543 clidr = 1 << CLIDR_LOUU_SHIFT;
1544 clidr |= 1 << CLIDR_LOUIS_SHIFT;
1545 clidr |= CACHE_TYPE_DATA << CLIDR_CTYPE_SHIFT(1);
1549 * Instruction cache invalidation to the PoU is required so let L1 have
1550 * an instruction cache. If L1 already has a data cache, it will be
1551 * CACHE_TYPE_SEPARATE.
1553 if (!(ctr_el0 & CTR_EL0_DIC))
1554 clidr |= CACHE_TYPE_INST << CLIDR_CTYPE_SHIFT(1);
1556 clidr |= loc << CLIDR_LOC_SHIFT;
1559 * Add tag cache unified to data cache. Allocation tags and data are
1560 * unified in a cache line so that it looks valid even if there is only
1563 if (kvm_has_mte(vcpu->kvm))
1564 clidr |= 2 << CLIDR_TTYPE_SHIFT(loc);
1566 __vcpu_sys_reg(vcpu, r->reg) = clidr;
1569 static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1572 u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
1573 u64 idc = !CLIDR_LOC(val) || (!CLIDR_LOUIS(val) && !CLIDR_LOUU(val));
1575 if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc))
1578 __vcpu_sys_reg(vcpu, rd->reg) = val;
1583 static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1584 const struct sys_reg_desc *r)
1589 vcpu_write_sys_reg(vcpu, p->regval, reg);
1591 p->regval = vcpu_read_sys_reg(vcpu, reg);
1595 static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1596 const struct sys_reg_desc *r)
1601 return write_to_read_only(vcpu, p, r);
1603 csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
1604 csselr &= CSSELR_EL1_Level | CSSELR_EL1_InD;
1605 if (csselr < CSSELR_MAX)
1606 p->regval = get_ccsidr(vcpu, csselr);
1611 static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
1612 const struct sys_reg_desc *rd)
1614 if (kvm_has_mte(vcpu->kvm))
1620 #define MTE_REG(name) { \
1621 SYS_DESC(SYS_##name), \
1622 .access = undef_access, \
1623 .reset = reset_unknown, \
1625 .visibility = mte_visibility, \
1628 static unsigned int el2_visibility(const struct kvm_vcpu *vcpu,
1629 const struct sys_reg_desc *rd)
1631 if (vcpu_has_nv(vcpu))
1637 #define EL2_REG(name, acc, rst, v) { \
1638 SYS_DESC(SYS_##name), \
1642 .visibility = el2_visibility, \
1647 * EL{0,1}2 registers are the EL2 view on an EL0 or EL1 register when
1648 * HCR_EL2.E2H==1, and only in the sysreg table for convenience of
1649 * handling traps. Given that, they are always hidden from userspace.
1651 static unsigned int elx2_visibility(const struct kvm_vcpu *vcpu,
1652 const struct sys_reg_desc *rd)
1654 return REG_HIDDEN_USER;
1657 #define EL12_REG(name, acc, rst, v) { \
1658 SYS_DESC(SYS_##name##_EL12), \
1661 .reg = name##_EL1, \
1663 .visibility = elx2_visibility, \
1666 /* sys_reg_desc initialiser for known cpufeature ID registers */
1667 #define ID_SANITISED(name) { \
1668 SYS_DESC(SYS_##name), \
1669 .access = access_id_reg, \
1670 .get_user = get_id_reg, \
1671 .set_user = set_id_reg, \
1672 .visibility = id_visibility, \
1675 /* sys_reg_desc initialiser for known cpufeature ID registers */
1676 #define AA32_ID_SANITISED(name) { \
1677 SYS_DESC(SYS_##name), \
1678 .access = access_id_reg, \
1679 .get_user = get_id_reg, \
1680 .set_user = set_id_reg, \
1681 .visibility = aa32_id_visibility, \
1685 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
1686 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
1687 * (1 <= crm < 8, 0 <= Op2 < 8).
1689 #define ID_UNALLOCATED(crm, op2) { \
1690 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
1691 .access = access_id_reg, \
1692 .get_user = get_id_reg, \
1693 .set_user = set_id_reg, \
1694 .visibility = raz_visibility \
1698 * sys_reg_desc initialiser for known ID registers that we hide from guests.
1699 * For now, these are exposed just like unallocated ID regs: they appear
1700 * RAZ for the guest.
1702 #define ID_HIDDEN(name) { \
1703 SYS_DESC(SYS_##name), \
1704 .access = access_id_reg, \
1705 .get_user = get_id_reg, \
1706 .set_user = set_id_reg, \
1707 .visibility = raz_visibility, \
1710 static bool access_sp_el1(struct kvm_vcpu *vcpu,
1711 struct sys_reg_params *p,
1712 const struct sys_reg_desc *r)
1715 __vcpu_sys_reg(vcpu, SP_EL1) = p->regval;
1717 p->regval = __vcpu_sys_reg(vcpu, SP_EL1);
1722 static bool access_elr(struct kvm_vcpu *vcpu,
1723 struct sys_reg_params *p,
1724 const struct sys_reg_desc *r)
1727 vcpu_write_sys_reg(vcpu, p->regval, ELR_EL1);
1729 p->regval = vcpu_read_sys_reg(vcpu, ELR_EL1);
1734 static bool access_spsr(struct kvm_vcpu *vcpu,
1735 struct sys_reg_params *p,
1736 const struct sys_reg_desc *r)
1739 __vcpu_sys_reg(vcpu, SPSR_EL1) = p->regval;
1741 p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1);
1747 * Architected system registers.
1748 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
1750 * Debug handling: We do trap most, if not all debug related system
1751 * registers. The implementation is good enough to ensure that a guest
1752 * can use these with minimal performance degradation. The drawback is
1753 * that we don't implement any of the external debug architecture.
1754 * This should be revisited if we ever encounter a more demanding
1757 static const struct sys_reg_desc sys_reg_descs[] = {
1758 { SYS_DESC(SYS_DC_ISW), access_dcsw },
1759 { SYS_DESC(SYS_DC_CSW), access_dcsw },
1760 { SYS_DESC(SYS_DC_CISW), access_dcsw },
1762 DBG_BCR_BVR_WCR_WVR_EL1(0),
1763 DBG_BCR_BVR_WCR_WVR_EL1(1),
1764 { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
1765 { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
1766 DBG_BCR_BVR_WCR_WVR_EL1(2),
1767 DBG_BCR_BVR_WCR_WVR_EL1(3),
1768 DBG_BCR_BVR_WCR_WVR_EL1(4),
1769 DBG_BCR_BVR_WCR_WVR_EL1(5),
1770 DBG_BCR_BVR_WCR_WVR_EL1(6),
1771 DBG_BCR_BVR_WCR_WVR_EL1(7),
1772 DBG_BCR_BVR_WCR_WVR_EL1(8),
1773 DBG_BCR_BVR_WCR_WVR_EL1(9),
1774 DBG_BCR_BVR_WCR_WVR_EL1(10),
1775 DBG_BCR_BVR_WCR_WVR_EL1(11),
1776 DBG_BCR_BVR_WCR_WVR_EL1(12),
1777 DBG_BCR_BVR_WCR_WVR_EL1(13),
1778 DBG_BCR_BVR_WCR_WVR_EL1(14),
1779 DBG_BCR_BVR_WCR_WVR_EL1(15),
1781 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
1782 { SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 },
1783 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1,
1784 SYS_OSLSR_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, },
1785 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
1786 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
1787 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
1788 { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
1789 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
1791 { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
1792 { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
1793 // DBGDTR[TR]X_EL0 share the same encoding
1794 { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
1796 { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
1798 { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
1801 * ID regs: all ID_SANITISED() entries here must have corresponding
1802 * entries in arm64_ftr_regs[].
1805 /* AArch64 mappings of the AArch32 ID registers */
1807 AA32_ID_SANITISED(ID_PFR0_EL1),
1808 AA32_ID_SANITISED(ID_PFR1_EL1),
1809 { SYS_DESC(SYS_ID_DFR0_EL1), .access = access_id_reg,
1810 .get_user = get_id_reg, .set_user = set_id_dfr0_el1,
1811 .visibility = aa32_id_visibility, },
1812 ID_HIDDEN(ID_AFR0_EL1),
1813 AA32_ID_SANITISED(ID_MMFR0_EL1),
1814 AA32_ID_SANITISED(ID_MMFR1_EL1),
1815 AA32_ID_SANITISED(ID_MMFR2_EL1),
1816 AA32_ID_SANITISED(ID_MMFR3_EL1),
1819 AA32_ID_SANITISED(ID_ISAR0_EL1),
1820 AA32_ID_SANITISED(ID_ISAR1_EL1),
1821 AA32_ID_SANITISED(ID_ISAR2_EL1),
1822 AA32_ID_SANITISED(ID_ISAR3_EL1),
1823 AA32_ID_SANITISED(ID_ISAR4_EL1),
1824 AA32_ID_SANITISED(ID_ISAR5_EL1),
1825 AA32_ID_SANITISED(ID_MMFR4_EL1),
1826 AA32_ID_SANITISED(ID_ISAR6_EL1),
1829 AA32_ID_SANITISED(MVFR0_EL1),
1830 AA32_ID_SANITISED(MVFR1_EL1),
1831 AA32_ID_SANITISED(MVFR2_EL1),
1832 ID_UNALLOCATED(3,3),
1833 AA32_ID_SANITISED(ID_PFR2_EL1),
1834 ID_HIDDEN(ID_DFR1_EL1),
1835 AA32_ID_SANITISED(ID_MMFR5_EL1),
1836 ID_UNALLOCATED(3,7),
1838 /* AArch64 ID registers */
1840 { SYS_DESC(SYS_ID_AA64PFR0_EL1), .access = access_id_reg,
1841 .get_user = get_id_reg, .set_user = set_id_aa64pfr0_el1, },
1842 ID_SANITISED(ID_AA64PFR1_EL1),
1843 ID_UNALLOCATED(4,2),
1844 ID_UNALLOCATED(4,3),
1845 ID_SANITISED(ID_AA64ZFR0_EL1),
1846 ID_HIDDEN(ID_AA64SMFR0_EL1),
1847 ID_UNALLOCATED(4,6),
1848 ID_UNALLOCATED(4,7),
1851 { SYS_DESC(SYS_ID_AA64DFR0_EL1), .access = access_id_reg,
1852 .get_user = get_id_reg, .set_user = set_id_aa64dfr0_el1, },
1853 ID_SANITISED(ID_AA64DFR1_EL1),
1854 ID_UNALLOCATED(5,2),
1855 ID_UNALLOCATED(5,3),
1856 ID_HIDDEN(ID_AA64AFR0_EL1),
1857 ID_HIDDEN(ID_AA64AFR1_EL1),
1858 ID_UNALLOCATED(5,6),
1859 ID_UNALLOCATED(5,7),
1862 ID_SANITISED(ID_AA64ISAR0_EL1),
1863 ID_SANITISED(ID_AA64ISAR1_EL1),
1864 ID_SANITISED(ID_AA64ISAR2_EL1),
1865 ID_UNALLOCATED(6,3),
1866 ID_UNALLOCATED(6,4),
1867 ID_UNALLOCATED(6,5),
1868 ID_UNALLOCATED(6,6),
1869 ID_UNALLOCATED(6,7),
1872 ID_SANITISED(ID_AA64MMFR0_EL1),
1873 ID_SANITISED(ID_AA64MMFR1_EL1),
1874 ID_SANITISED(ID_AA64MMFR2_EL1),
1875 ID_UNALLOCATED(7,3),
1876 ID_UNALLOCATED(7,4),
1877 ID_UNALLOCATED(7,5),
1878 ID_UNALLOCATED(7,6),
1879 ID_UNALLOCATED(7,7),
1881 { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
1882 { SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
1883 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
1888 { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
1889 { SYS_DESC(SYS_TRFCR_EL1), undef_access },
1890 { SYS_DESC(SYS_SMPRI_EL1), undef_access },
1891 { SYS_DESC(SYS_SMCR_EL1), undef_access },
1892 { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
1893 { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
1894 { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
1902 { SYS_DESC(SYS_SPSR_EL1), access_spsr},
1903 { SYS_DESC(SYS_ELR_EL1), access_elr},
1905 { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
1906 { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
1907 { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
1909 { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
1910 { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
1911 { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
1912 { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
1913 { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
1914 { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
1915 { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
1916 { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
1919 MTE_REG(TFSRE0_EL1),
1921 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
1922 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
1924 { SYS_DESC(SYS_PMSCR_EL1), undef_access },
1925 { SYS_DESC(SYS_PMSNEVFR_EL1), undef_access },
1926 { SYS_DESC(SYS_PMSICR_EL1), undef_access },
1927 { SYS_DESC(SYS_PMSIRR_EL1), undef_access },
1928 { SYS_DESC(SYS_PMSFCR_EL1), undef_access },
1929 { SYS_DESC(SYS_PMSEVFR_EL1), undef_access },
1930 { SYS_DESC(SYS_PMSLATFR_EL1), undef_access },
1931 { SYS_DESC(SYS_PMSIDR_EL1), undef_access },
1932 { SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
1933 { SYS_DESC(SYS_PMBPTR_EL1), undef_access },
1934 { SYS_DESC(SYS_PMBSR_EL1), undef_access },
1935 /* PMBIDR_EL1 is not trapped */
1937 { PMU_SYS_REG(SYS_PMINTENSET_EL1),
1938 .access = access_pminten, .reg = PMINTENSET_EL1 },
1939 { PMU_SYS_REG(SYS_PMINTENCLR_EL1),
1940 .access = access_pminten, .reg = PMINTENSET_EL1 },
1941 { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
1943 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
1944 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
1946 { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
1947 { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
1948 { SYS_DESC(SYS_LORN_EL1), trap_loregion },
1949 { SYS_DESC(SYS_LORC_EL1), trap_loregion },
1950 { SYS_DESC(SYS_LORID_EL1), trap_loregion },
1952 { SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 },
1953 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
1955 { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
1956 { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
1957 { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
1958 { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
1959 { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
1960 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
1961 { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
1962 { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
1963 { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
1964 { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
1965 { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
1966 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
1968 { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
1969 { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
1971 { SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
1973 { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
1975 { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
1976 { SYS_DESC(SYS_CLIDR_EL1), access_clidr, reset_clidr, CLIDR_EL1,
1977 .set_user = set_clidr },
1978 { SYS_DESC(SYS_CCSIDR2_EL1), undef_access },
1979 { SYS_DESC(SYS_SMIDR_EL1), undef_access },
1980 { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
1981 { SYS_DESC(SYS_CTR_EL0), access_ctr },
1982 { SYS_DESC(SYS_SVCR), undef_access },
1984 { PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr,
1985 .reset = reset_pmcr, .reg = PMCR_EL0 },
1986 { PMU_SYS_REG(SYS_PMCNTENSET_EL0),
1987 .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
1988 { PMU_SYS_REG(SYS_PMCNTENCLR_EL0),
1989 .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
1990 { PMU_SYS_REG(SYS_PMOVSCLR_EL0),
1991 .access = access_pmovs, .reg = PMOVSSET_EL0 },
1993 * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
1994 * previously (and pointlessly) advertised in the past...
1996 { PMU_SYS_REG(SYS_PMSWINC_EL0),
1997 .get_user = get_raz_reg, .set_user = set_wi_reg,
1998 .access = access_pmswinc, .reset = NULL },
1999 { PMU_SYS_REG(SYS_PMSELR_EL0),
2000 .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
2001 { PMU_SYS_REG(SYS_PMCEID0_EL0),
2002 .access = access_pmceid, .reset = NULL },
2003 { PMU_SYS_REG(SYS_PMCEID1_EL0),
2004 .access = access_pmceid, .reset = NULL },
2005 { PMU_SYS_REG(SYS_PMCCNTR_EL0),
2006 .access = access_pmu_evcntr, .reset = reset_unknown,
2007 .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr},
2008 { PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
2009 .access = access_pmu_evtyper, .reset = NULL },
2010 { PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
2011 .access = access_pmu_evcntr, .reset = NULL },
2013 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
2014 * in 32bit mode. Here we choose to reset it as zero for consistency.
2016 { PMU_SYS_REG(SYS_PMUSERENR_EL0), .access = access_pmuserenr,
2017 .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
2018 { PMU_SYS_REG(SYS_PMOVSSET_EL0),
2019 .access = access_pmovs, .reg = PMOVSSET_EL0 },
2021 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
2022 { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
2023 { SYS_DESC(SYS_TPIDR2_EL0), undef_access },
2025 { SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
2027 { SYS_DESC(SYS_AMCR_EL0), undef_access },
2028 { SYS_DESC(SYS_AMCFGR_EL0), undef_access },
2029 { SYS_DESC(SYS_AMCGCR_EL0), undef_access },
2030 { SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
2031 { SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
2032 { SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
2033 { SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
2034 { SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
2035 AMU_AMEVCNTR0_EL0(0),
2036 AMU_AMEVCNTR0_EL0(1),
2037 AMU_AMEVCNTR0_EL0(2),
2038 AMU_AMEVCNTR0_EL0(3),
2039 AMU_AMEVCNTR0_EL0(4),
2040 AMU_AMEVCNTR0_EL0(5),
2041 AMU_AMEVCNTR0_EL0(6),
2042 AMU_AMEVCNTR0_EL0(7),
2043 AMU_AMEVCNTR0_EL0(8),
2044 AMU_AMEVCNTR0_EL0(9),
2045 AMU_AMEVCNTR0_EL0(10),
2046 AMU_AMEVCNTR0_EL0(11),
2047 AMU_AMEVCNTR0_EL0(12),
2048 AMU_AMEVCNTR0_EL0(13),
2049 AMU_AMEVCNTR0_EL0(14),
2050 AMU_AMEVCNTR0_EL0(15),
2051 AMU_AMEVTYPER0_EL0(0),
2052 AMU_AMEVTYPER0_EL0(1),
2053 AMU_AMEVTYPER0_EL0(2),
2054 AMU_AMEVTYPER0_EL0(3),
2055 AMU_AMEVTYPER0_EL0(4),
2056 AMU_AMEVTYPER0_EL0(5),
2057 AMU_AMEVTYPER0_EL0(6),
2058 AMU_AMEVTYPER0_EL0(7),
2059 AMU_AMEVTYPER0_EL0(8),
2060 AMU_AMEVTYPER0_EL0(9),
2061 AMU_AMEVTYPER0_EL0(10),
2062 AMU_AMEVTYPER0_EL0(11),
2063 AMU_AMEVTYPER0_EL0(12),
2064 AMU_AMEVTYPER0_EL0(13),
2065 AMU_AMEVTYPER0_EL0(14),
2066 AMU_AMEVTYPER0_EL0(15),
2067 AMU_AMEVCNTR1_EL0(0),
2068 AMU_AMEVCNTR1_EL0(1),
2069 AMU_AMEVCNTR1_EL0(2),
2070 AMU_AMEVCNTR1_EL0(3),
2071 AMU_AMEVCNTR1_EL0(4),
2072 AMU_AMEVCNTR1_EL0(5),
2073 AMU_AMEVCNTR1_EL0(6),
2074 AMU_AMEVCNTR1_EL0(7),
2075 AMU_AMEVCNTR1_EL0(8),
2076 AMU_AMEVCNTR1_EL0(9),
2077 AMU_AMEVCNTR1_EL0(10),
2078 AMU_AMEVCNTR1_EL0(11),
2079 AMU_AMEVCNTR1_EL0(12),
2080 AMU_AMEVCNTR1_EL0(13),
2081 AMU_AMEVCNTR1_EL0(14),
2082 AMU_AMEVCNTR1_EL0(15),
2083 AMU_AMEVTYPER1_EL0(0),
2084 AMU_AMEVTYPER1_EL0(1),
2085 AMU_AMEVTYPER1_EL0(2),
2086 AMU_AMEVTYPER1_EL0(3),
2087 AMU_AMEVTYPER1_EL0(4),
2088 AMU_AMEVTYPER1_EL0(5),
2089 AMU_AMEVTYPER1_EL0(6),
2090 AMU_AMEVTYPER1_EL0(7),
2091 AMU_AMEVTYPER1_EL0(8),
2092 AMU_AMEVTYPER1_EL0(9),
2093 AMU_AMEVTYPER1_EL0(10),
2094 AMU_AMEVTYPER1_EL0(11),
2095 AMU_AMEVTYPER1_EL0(12),
2096 AMU_AMEVTYPER1_EL0(13),
2097 AMU_AMEVTYPER1_EL0(14),
2098 AMU_AMEVTYPER1_EL0(15),
2100 { SYS_DESC(SYS_CNTPCT_EL0), access_arch_timer },
2101 { SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer },
2102 { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
2103 { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
2104 { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
2107 PMU_PMEVCNTR_EL0(0),
2108 PMU_PMEVCNTR_EL0(1),
2109 PMU_PMEVCNTR_EL0(2),
2110 PMU_PMEVCNTR_EL0(3),
2111 PMU_PMEVCNTR_EL0(4),
2112 PMU_PMEVCNTR_EL0(5),
2113 PMU_PMEVCNTR_EL0(6),
2114 PMU_PMEVCNTR_EL0(7),
2115 PMU_PMEVCNTR_EL0(8),
2116 PMU_PMEVCNTR_EL0(9),
2117 PMU_PMEVCNTR_EL0(10),
2118 PMU_PMEVCNTR_EL0(11),
2119 PMU_PMEVCNTR_EL0(12),
2120 PMU_PMEVCNTR_EL0(13),
2121 PMU_PMEVCNTR_EL0(14),
2122 PMU_PMEVCNTR_EL0(15),
2123 PMU_PMEVCNTR_EL0(16),
2124 PMU_PMEVCNTR_EL0(17),
2125 PMU_PMEVCNTR_EL0(18),
2126 PMU_PMEVCNTR_EL0(19),
2127 PMU_PMEVCNTR_EL0(20),
2128 PMU_PMEVCNTR_EL0(21),
2129 PMU_PMEVCNTR_EL0(22),
2130 PMU_PMEVCNTR_EL0(23),
2131 PMU_PMEVCNTR_EL0(24),
2132 PMU_PMEVCNTR_EL0(25),
2133 PMU_PMEVCNTR_EL0(26),
2134 PMU_PMEVCNTR_EL0(27),
2135 PMU_PMEVCNTR_EL0(28),
2136 PMU_PMEVCNTR_EL0(29),
2137 PMU_PMEVCNTR_EL0(30),
2138 /* PMEVTYPERn_EL0 */
2139 PMU_PMEVTYPER_EL0(0),
2140 PMU_PMEVTYPER_EL0(1),
2141 PMU_PMEVTYPER_EL0(2),
2142 PMU_PMEVTYPER_EL0(3),
2143 PMU_PMEVTYPER_EL0(4),
2144 PMU_PMEVTYPER_EL0(5),
2145 PMU_PMEVTYPER_EL0(6),
2146 PMU_PMEVTYPER_EL0(7),
2147 PMU_PMEVTYPER_EL0(8),
2148 PMU_PMEVTYPER_EL0(9),
2149 PMU_PMEVTYPER_EL0(10),
2150 PMU_PMEVTYPER_EL0(11),
2151 PMU_PMEVTYPER_EL0(12),
2152 PMU_PMEVTYPER_EL0(13),
2153 PMU_PMEVTYPER_EL0(14),
2154 PMU_PMEVTYPER_EL0(15),
2155 PMU_PMEVTYPER_EL0(16),
2156 PMU_PMEVTYPER_EL0(17),
2157 PMU_PMEVTYPER_EL0(18),
2158 PMU_PMEVTYPER_EL0(19),
2159 PMU_PMEVTYPER_EL0(20),
2160 PMU_PMEVTYPER_EL0(21),
2161 PMU_PMEVTYPER_EL0(22),
2162 PMU_PMEVTYPER_EL0(23),
2163 PMU_PMEVTYPER_EL0(24),
2164 PMU_PMEVTYPER_EL0(25),
2165 PMU_PMEVTYPER_EL0(26),
2166 PMU_PMEVTYPER_EL0(27),
2167 PMU_PMEVTYPER_EL0(28),
2168 PMU_PMEVTYPER_EL0(29),
2169 PMU_PMEVTYPER_EL0(30),
2171 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
2172 * in 32bit mode. Here we choose to reset it as zero for consistency.
2174 { PMU_SYS_REG(SYS_PMCCFILTR_EL0), .access = access_pmu_evtyper,
2175 .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
2177 EL2_REG(VPIDR_EL2, access_rw, reset_unknown, 0),
2178 EL2_REG(VMPIDR_EL2, access_rw, reset_unknown, 0),
2179 EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
2180 EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
2181 EL2_REG(HCR_EL2, access_rw, reset_val, 0),
2182 EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
2183 EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
2184 EL2_REG(HSTR_EL2, access_rw, reset_val, 0),
2185 EL2_REG(HACR_EL2, access_rw, reset_val, 0),
2187 EL2_REG(TTBR0_EL2, access_rw, reset_val, 0),
2188 EL2_REG(TTBR1_EL2, access_rw, reset_val, 0),
2189 EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1),
2190 EL2_REG(VTTBR_EL2, access_rw, reset_val, 0),
2191 EL2_REG(VTCR_EL2, access_rw, reset_val, 0),
2193 { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
2194 EL2_REG(SPSR_EL2, access_rw, reset_val, 0),
2195 EL2_REG(ELR_EL2, access_rw, reset_val, 0),
2196 { SYS_DESC(SYS_SP_EL1), access_sp_el1},
2198 { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
2199 EL2_REG(AFSR0_EL2, access_rw, reset_val, 0),
2200 EL2_REG(AFSR1_EL2, access_rw, reset_val, 0),
2201 EL2_REG(ESR_EL2, access_rw, reset_val, 0),
2202 { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
2204 EL2_REG(FAR_EL2, access_rw, reset_val, 0),
2205 EL2_REG(HPFAR_EL2, access_rw, reset_val, 0),
2207 EL2_REG(MAIR_EL2, access_rw, reset_val, 0),
2208 EL2_REG(AMAIR_EL2, access_rw, reset_val, 0),
2210 EL2_REG(VBAR_EL2, access_rw, reset_val, 0),
2211 EL2_REG(RVBAR_EL2, access_rw, reset_val, 0),
2212 { SYS_DESC(SYS_RMR_EL2), trap_undef },
2214 EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0),
2215 EL2_REG(TPIDR_EL2, access_rw, reset_val, 0),
2217 EL2_REG(CNTVOFF_EL2, access_rw, reset_val, 0),
2218 EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0),
2220 EL12_REG(SCTLR, access_vm_reg, reset_val, 0x00C50078),
2221 EL12_REG(CPACR, access_rw, reset_val, 0),
2222 EL12_REG(TTBR0, access_vm_reg, reset_unknown, 0),
2223 EL12_REG(TTBR1, access_vm_reg, reset_unknown, 0),
2224 EL12_REG(TCR, access_vm_reg, reset_val, 0),
2225 { SYS_DESC(SYS_SPSR_EL12), access_spsr},
2226 { SYS_DESC(SYS_ELR_EL12), access_elr},
2227 EL12_REG(AFSR0, access_vm_reg, reset_unknown, 0),
2228 EL12_REG(AFSR1, access_vm_reg, reset_unknown, 0),
2229 EL12_REG(ESR, access_vm_reg, reset_unknown, 0),
2230 EL12_REG(FAR, access_vm_reg, reset_unknown, 0),
2231 EL12_REG(MAIR, access_vm_reg, reset_unknown, 0),
2232 EL12_REG(AMAIR, access_vm_reg, reset_amair_el1, 0),
2233 EL12_REG(VBAR, access_rw, reset_val, 0),
2234 EL12_REG(CONTEXTIDR, access_vm_reg, reset_val, 0),
2235 EL12_REG(CNTKCTL, access_rw, reset_val, 0),
2237 EL2_REG(SP_EL2, NULL, reset_unknown, 0),
2240 static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
2241 struct sys_reg_params *p,
2242 const struct sys_reg_desc *r)
2245 return ignore_write(vcpu, p);
2247 u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
2248 u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
2249 u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL1_EL3_SHIFT);
2251 p->regval = ((((dfr >> ID_AA64DFR0_EL1_WRPs_SHIFT) & 0xf) << 28) |
2252 (((dfr >> ID_AA64DFR0_EL1_BRPs_SHIFT) & 0xf) << 24) |
2253 (((dfr >> ID_AA64DFR0_EL1_CTX_CMPs_SHIFT) & 0xf) << 20)
2254 | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12));
2260 * AArch32 debug register mappings
2262 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
2263 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
2265 * None of the other registers share their location, so treat them as
2266 * if they were 64bit.
2268 #define DBG_BCR_BVR_WCR_WVR(n) \
2270 { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
2272 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
2274 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
2276 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
2278 #define DBGBXVR(n) \
2279 { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_bvr, NULL, n }
2282 * Trapped cp14 registers. We generally ignore most of the external
2283 * debug, on the principle that they don't really make sense to a
2284 * guest. Revisit this one day, would this principle change.
2286 static const struct sys_reg_desc cp14_regs[] = {
2288 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
2290 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
2292 DBG_BCR_BVR_WCR_WVR(0),
2294 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
2295 DBG_BCR_BVR_WCR_WVR(1),
2297 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 },
2299 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 },
2300 DBG_BCR_BVR_WCR_WVR(2),
2301 /* DBGDTR[RT]Xint */
2302 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
2303 /* DBGDTR[RT]Xext */
2304 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
2305 DBG_BCR_BVR_WCR_WVR(3),
2306 DBG_BCR_BVR_WCR_WVR(4),
2307 DBG_BCR_BVR_WCR_WVR(5),
2309 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
2311 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
2312 DBG_BCR_BVR_WCR_WVR(6),
2314 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 },
2315 DBG_BCR_BVR_WCR_WVR(7),
2316 DBG_BCR_BVR_WCR_WVR(8),
2317 DBG_BCR_BVR_WCR_WVR(9),
2318 DBG_BCR_BVR_WCR_WVR(10),
2319 DBG_BCR_BVR_WCR_WVR(11),
2320 DBG_BCR_BVR_WCR_WVR(12),
2321 DBG_BCR_BVR_WCR_WVR(13),
2322 DBG_BCR_BVR_WCR_WVR(14),
2323 DBG_BCR_BVR_WCR_WVR(15),
2325 /* DBGDRAR (32bit) */
2326 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
2330 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 },
2333 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 },
2337 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
2340 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
2353 /* DBGDSAR (32bit) */
2354 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
2357 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
2359 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
2361 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
2363 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
2365 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
2367 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
2370 /* Trapped cp14 64bit registers */
2371 static const struct sys_reg_desc cp14_64_regs[] = {
2372 /* DBGDRAR (64bit) */
2373 { Op1( 0), CRm( 1), .access = trap_raz_wi },
2375 /* DBGDSAR (64bit) */
2376 { Op1( 0), CRm( 2), .access = trap_raz_wi },
2379 #define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2) \
2381 Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2), \
2382 .visibility = pmu_visibility
2384 /* Macro to expand the PMEVCNTRn register */
2385 #define PMU_PMEVCNTR(n) \
2386 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
2387 (0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
2388 .access = access_pmu_evcntr }
2390 /* Macro to expand the PMEVTYPERn register */
2391 #define PMU_PMEVTYPER(n) \
2392 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
2393 (0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
2394 .access = access_pmu_evtyper }
2396 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
2397 * depending on the way they are accessed (as a 32bit or a 64bit
2400 static const struct sys_reg_desc cp15_regs[] = {
2401 { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
2402 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 },
2404 { AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 },
2406 { AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 },
2407 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
2408 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 },
2410 { AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 },
2412 { AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
2413 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
2415 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
2416 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
2418 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 },
2420 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 },
2422 { AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 },
2424 { AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 },
2427 * DC{C,I,CI}SW operations:
2429 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
2430 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
2431 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
2434 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 0), .access = access_pmcr },
2435 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 1), .access = access_pmcnten },
2436 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 2), .access = access_pmcnten },
2437 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 3), .access = access_pmovs },
2438 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 4), .access = access_pmswinc },
2439 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 5), .access = access_pmselr },
2440 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 6), .access = access_pmceid },
2441 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 7), .access = access_pmceid },
2442 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 0), .access = access_pmu_evcntr },
2443 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 1), .access = access_pmu_evtyper },
2444 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 2), .access = access_pmu_evcntr },
2445 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 0), .access = access_pmuserenr },
2446 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 1), .access = access_pminten },
2447 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 2), .access = access_pminten },
2448 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 3), .access = access_pmovs },
2449 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 4), .access = access_pmceid },
2450 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 5), .access = access_pmceid },
2452 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 6), .access = trap_raz_wi },
2455 { AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
2457 { AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 },
2459 { AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 },
2461 { AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
2464 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
2466 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
2469 { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
2470 { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
2537 { CP15_PMU_SYS_REG(DIRECT, 0, 14, 15, 7), .access = access_pmu_evtyper },
2539 { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
2540 { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
2543 { Op1(1), CRn( 0), CRm( 0), Op2(2), undef_access },
2545 { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 },
2548 static const struct sys_reg_desc cp15_64_regs[] = {
2549 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
2550 { CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr },
2551 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
2552 { SYS_DESC(SYS_AARCH32_CNTPCT), access_arch_timer },
2553 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
2554 { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
2555 { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
2556 { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
2557 { SYS_DESC(SYS_AARCH32_CNTPCTSS), access_arch_timer },
2560 static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
2565 for (i = 0; i < n; i++) {
2566 if (!is_32 && table[i].reg && !table[i].reset) {
2567 kvm_err("sys_reg table %pS entry %d lacks reset\n", &table[i], i);
2571 if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2572 kvm_err("sys_reg table %pS entry %d out of order\n", &table[i - 1], i - 1);
2580 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
2582 kvm_inject_undefined(vcpu);
2586 static void perform_access(struct kvm_vcpu *vcpu,
2587 struct sys_reg_params *params,
2588 const struct sys_reg_desc *r)
2590 trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
2592 /* Check for regs disabled by runtime config */
2593 if (sysreg_hidden(vcpu, r)) {
2594 kvm_inject_undefined(vcpu);
2599 * Not having an accessor means that we have configured a trap
2600 * that we don't know how to handle. This certainly qualifies
2601 * as a gross bug that should be fixed right away.
2605 /* Skip instruction if instructed so */
2606 if (likely(r->access(vcpu, params, r)))
2611 * emulate_cp -- tries to match a sys_reg access in a handling table, and
2612 * call the corresponding trap handler.
2614 * @params: pointer to the descriptor of the access
2615 * @table: array of trap descriptors
2616 * @num: size of the trap descriptor array
2618 * Return true if the access has been handled, false if not.
2620 static bool emulate_cp(struct kvm_vcpu *vcpu,
2621 struct sys_reg_params *params,
2622 const struct sys_reg_desc *table,
2625 const struct sys_reg_desc *r;
2628 return false; /* Not handled */
2630 r = find_reg(params, table, num);
2633 perform_access(vcpu, params, r);
2641 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
2642 struct sys_reg_params *params)
2644 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
2648 case ESR_ELx_EC_CP15_32:
2649 case ESR_ELx_EC_CP15_64:
2652 case ESR_ELx_EC_CP14_MR:
2653 case ESR_ELx_EC_CP14_64:
2660 print_sys_reg_msg(params,
2661 "Unsupported guest CP%d access at: %08lx [%08lx]\n",
2662 cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2663 kvm_inject_undefined(vcpu);
2667 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
2668 * @vcpu: The VCPU pointer
2669 * @run: The kvm_run struct
2671 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
2672 const struct sys_reg_desc *global,
2675 struct sys_reg_params params;
2676 u64 esr = kvm_vcpu_get_esr(vcpu);
2677 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2678 int Rt2 = (esr >> 10) & 0x1f;
2680 params.CRm = (esr >> 1) & 0xf;
2681 params.is_write = ((esr & 1) == 0);
2684 params.Op1 = (esr >> 16) & 0xf;
2689 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
2690 * backends between AArch32 and AArch64, we get away with it.
2692 if (params.is_write) {
2693 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
2694 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
2698 * If the table contains a handler, handle the
2699 * potential register operation in the case of a read and return
2702 if (emulate_cp(vcpu, ¶ms, global, nr_global)) {
2703 /* Split up the value between registers for the read side */
2704 if (!params.is_write) {
2705 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
2706 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
2712 unhandled_cp_access(vcpu, ¶ms);
2716 static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params);
2719 * The CP10 ID registers are architecturally mapped to AArch64 feature
2720 * registers. Abuse that fact so we can rely on the AArch64 handler for accesses
2723 static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params)
2725 u8 reg_id = (esr >> 10) & 0xf;
2728 params->is_write = ((esr & 1) == 0);
2734 /* CP10 ID registers are read-only */
2735 valid = !params->is_write;
2757 kvm_pr_unimpl("Unhandled cp10 register %s: %u\n",
2758 params->is_write ? "write" : "read", reg_id);
2763 * kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and
2764 * VFP Register' from AArch32.
2765 * @vcpu: The vCPU pointer
2767 * MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers.
2768 * Work out the correct AArch64 system register encoding and reroute to the
2769 * AArch64 system register emulation.
2771 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu)
2773 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2774 u64 esr = kvm_vcpu_get_esr(vcpu);
2775 struct sys_reg_params params;
2777 /* UNDEF on any unhandled register access */
2778 if (!kvm_esr_cp10_id_to_sys64(esr, ¶ms)) {
2779 kvm_inject_undefined(vcpu);
2783 if (emulate_sys_reg(vcpu, ¶ms))
2784 vcpu_set_reg(vcpu, Rt, params.regval);
2790 * kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where
2791 * CRn=0, which corresponds to the AArch32 feature
2793 * @vcpu: the vCPU pointer
2794 * @params: the system register access parameters.
2796 * Our cp15 system register tables do not enumerate the AArch32 feature
2797 * registers. Conveniently, our AArch64 table does, and the AArch32 system
2798 * register encoding can be trivially remapped into the AArch64 for the feature
2799 * registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same.
2801 * According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit
2802 * System registers with (coproc=0b1111, CRn==c0)", read accesses from this
2803 * range are either UNKNOWN or RES0. Rerouting remains architectural as we
2804 * treat undefined registers in this range as RAZ.
2806 static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu,
2807 struct sys_reg_params *params)
2809 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2811 /* Treat impossible writes to RO registers as UNDEFINED */
2812 if (params->is_write) {
2813 unhandled_cp_access(vcpu, params);
2820 * All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32.
2821 * Avoid conflicting with future expansion of AArch64 feature registers
2822 * and simply treat them as RAZ here.
2824 if (params->CRm > 3)
2826 else if (!emulate_sys_reg(vcpu, params))
2829 vcpu_set_reg(vcpu, Rt, params->regval);
2834 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
2835 * @vcpu: The VCPU pointer
2836 * @run: The kvm_run struct
2838 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
2839 struct sys_reg_params *params,
2840 const struct sys_reg_desc *global,
2843 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2845 params->regval = vcpu_get_reg(vcpu, Rt);
2847 if (emulate_cp(vcpu, params, global, nr_global)) {
2848 if (!params->is_write)
2849 vcpu_set_reg(vcpu, Rt, params->regval);
2853 unhandled_cp_access(vcpu, params);
2857 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
2859 return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
2862 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
2864 struct sys_reg_params params;
2866 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
2869 * Certain AArch32 ID registers are handled by rerouting to the AArch64
2870 * system register table. Registers in the ID range where CRm=0 are
2871 * excluded from this scheme as they do not trivially map into AArch64
2872 * system register encodings.
2874 if (params.Op1 == 0 && params.CRn == 0 && params.CRm)
2875 return kvm_emulate_cp15_id_reg(vcpu, ¶ms);
2877 return kvm_handle_cp_32(vcpu, ¶ms, cp15_regs, ARRAY_SIZE(cp15_regs));
2880 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
2882 return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
2885 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
2887 struct sys_reg_params params;
2889 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
2891 return kvm_handle_cp_32(vcpu, ¶ms, cp14_regs, ARRAY_SIZE(cp14_regs));
2894 static bool is_imp_def_sys_reg(struct sys_reg_params *params)
2896 // See ARM DDI 0487E.a, section D12.3.2
2897 return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
2901 * emulate_sys_reg - Emulate a guest access to an AArch64 system register
2902 * @vcpu: The VCPU pointer
2903 * @params: Decoded system register parameters
2905 * Return: true if the system register access was successful, false otherwise.
2907 static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
2908 struct sys_reg_params *params)
2910 const struct sys_reg_desc *r;
2912 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2915 perform_access(vcpu, params, r);
2919 if (is_imp_def_sys_reg(params)) {
2920 kvm_inject_undefined(vcpu);
2922 print_sys_reg_msg(params,
2923 "Unsupported guest sys_reg access at: %lx [%08lx]\n",
2924 *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2925 kvm_inject_undefined(vcpu);
2931 * kvm_reset_sys_regs - sets system registers to reset value
2932 * @vcpu: The VCPU pointer
2934 * This function finds the right table above and sets the registers on the
2935 * virtual CPU struct to their architecturally defined reset values.
2937 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2941 for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++)
2942 if (sys_reg_descs[i].reset)
2943 sys_reg_descs[i].reset(vcpu, &sys_reg_descs[i]);
2947 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
2948 * @vcpu: The VCPU pointer
2950 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
2952 struct sys_reg_params params;
2953 unsigned long esr = kvm_vcpu_get_esr(vcpu);
2954 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2956 trace_kvm_handle_sys_reg(esr);
2958 params = esr_sys64_to_params(esr);
2959 params.regval = vcpu_get_reg(vcpu, Rt);
2961 if (!emulate_sys_reg(vcpu, ¶ms))
2964 if (!params.is_write)
2965 vcpu_set_reg(vcpu, Rt, params.regval);
2969 /******************************************************************************
2971 *****************************************************************************/
2973 static bool index_to_params(u64 id, struct sys_reg_params *params)
2975 switch (id & KVM_REG_SIZE_MASK) {
2976 case KVM_REG_SIZE_U64:
2977 /* Any unused index bits means it's not valid. */
2978 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
2979 | KVM_REG_ARM_COPROC_MASK
2980 | KVM_REG_ARM64_SYSREG_OP0_MASK
2981 | KVM_REG_ARM64_SYSREG_OP1_MASK
2982 | KVM_REG_ARM64_SYSREG_CRN_MASK
2983 | KVM_REG_ARM64_SYSREG_CRM_MASK
2984 | KVM_REG_ARM64_SYSREG_OP2_MASK))
2986 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
2987 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
2988 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
2989 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
2990 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
2991 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
2992 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
2993 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
2994 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
2995 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
3002 const struct sys_reg_desc *get_reg_by_id(u64 id,
3003 const struct sys_reg_desc table[],
3006 struct sys_reg_params params;
3008 if (!index_to_params(id, ¶ms))
3011 return find_reg(¶ms, table, num);
3014 /* Decode an index value, and find the sys_reg_desc entry. */
3015 static const struct sys_reg_desc *
3016 id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
3017 const struct sys_reg_desc table[], unsigned int num)
3020 const struct sys_reg_desc *r;
3022 /* We only do sys_reg for now. */
3023 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
3026 r = get_reg_by_id(id, table, num);
3028 /* Not saved in the sys_reg array and not otherwise accessible? */
3029 if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r)))
3036 * These are the invariant sys_reg registers: we let the guest see the
3037 * host versions of these, so they're part of the guest state.
3039 * A future CPU may provide a mechanism to present different values to
3040 * the guest, or a future kvm may trap them.
3043 #define FUNCTION_INVARIANT(reg) \
3044 static void get_##reg(struct kvm_vcpu *v, \
3045 const struct sys_reg_desc *r) \
3047 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
3050 FUNCTION_INVARIANT(midr_el1)
3051 FUNCTION_INVARIANT(revidr_el1)
3052 FUNCTION_INVARIANT(aidr_el1)
3054 static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
3056 ((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
3059 /* ->val is filled in by kvm_sys_reg_table_init() */
3060 static struct sys_reg_desc invariant_sys_regs[] __ro_after_init = {
3061 { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
3062 { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
3063 { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
3064 { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
3067 static int get_invariant_sys_reg(u64 id, u64 __user *uaddr)
3069 const struct sys_reg_desc *r;
3071 r = get_reg_by_id(id, invariant_sys_regs,
3072 ARRAY_SIZE(invariant_sys_regs));
3076 return put_user(r->val, uaddr);
3079 static int set_invariant_sys_reg(u64 id, u64 __user *uaddr)
3081 const struct sys_reg_desc *r;
3084 r = get_reg_by_id(id, invariant_sys_regs,
3085 ARRAY_SIZE(invariant_sys_regs));
3089 if (get_user(val, uaddr))
3092 /* This is what we mean by invariant: you can't change it. */
3099 static int demux_c15_get(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
3102 u32 __user *uval = uaddr;
3104 /* Fail if we have unknown bits set. */
3105 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
3106 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
3109 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
3110 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
3111 if (KVM_REG_SIZE(id) != 4)
3113 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
3114 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
3115 if (val >= CSSELR_MAX)
3118 return put_user(get_ccsidr(vcpu, val), uval);
3124 static int demux_c15_set(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
3127 u32 __user *uval = uaddr;
3129 /* Fail if we have unknown bits set. */
3130 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
3131 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
3134 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
3135 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
3136 if (KVM_REG_SIZE(id) != 4)
3138 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
3139 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
3140 if (val >= CSSELR_MAX)
3143 if (get_user(newval, uval))
3146 return set_ccsidr(vcpu, val, newval);
3152 int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
3153 const struct sys_reg_desc table[], unsigned int num)
3155 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
3156 const struct sys_reg_desc *r;
3160 r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
3161 if (!r || sysreg_hidden_user(vcpu, r))
3165 ret = (r->get_user)(vcpu, r, &val);
3167 val = __vcpu_sys_reg(vcpu, r->reg);
3172 ret = put_user(val, uaddr);
3177 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
3179 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
3182 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
3183 return demux_c15_get(vcpu, reg->id, uaddr);
3185 err = get_invariant_sys_reg(reg->id, uaddr);
3189 return kvm_sys_reg_get_user(vcpu, reg,
3190 sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
3193 int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
3194 const struct sys_reg_desc table[], unsigned int num)
3196 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
3197 const struct sys_reg_desc *r;
3201 if (get_user(val, uaddr))
3204 r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
3205 if (!r || sysreg_hidden_user(vcpu, r))
3208 if (sysreg_user_write_ignore(vcpu, r))
3212 ret = (r->set_user)(vcpu, r, val);
3214 __vcpu_sys_reg(vcpu, r->reg) = val;
3221 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
3223 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
3226 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
3227 return demux_c15_set(vcpu, reg->id, uaddr);
3229 err = set_invariant_sys_reg(reg->id, uaddr);
3233 return kvm_sys_reg_set_user(vcpu, reg,
3234 sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
3237 static unsigned int num_demux_regs(void)
3242 static int write_demux_regids(u64 __user *uindices)
3244 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
3247 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
3248 for (i = 0; i < CSSELR_MAX; i++) {
3249 if (put_user(val | i, uindices))
3256 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
3258 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
3259 KVM_REG_ARM64_SYSREG |
3260 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
3261 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
3262 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
3263 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
3264 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
3267 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
3272 if (put_user(sys_reg_to_index(reg), *uind))
3279 static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
3280 const struct sys_reg_desc *rd,
3282 unsigned int *total)
3285 * Ignore registers we trap but don't save,
3286 * and for which no custom user accessor is provided.
3288 if (!(rd->reg || rd->get_user))
3291 if (sysreg_hidden_user(vcpu, rd))
3294 if (!copy_reg_to_user(rd, uind))
3301 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
3302 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
3304 const struct sys_reg_desc *i2, *end2;
3305 unsigned int total = 0;
3309 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
3311 while (i2 != end2) {
3312 err = walk_one_sys_reg(vcpu, i2++, &uind, &total);
3319 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
3321 return ARRAY_SIZE(invariant_sys_regs)
3323 + walk_sys_regs(vcpu, (u64 __user *)NULL);
3326 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
3331 /* Then give them all the invariant registers' indices. */
3332 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
3333 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
3338 err = walk_sys_regs(vcpu, uindices);
3343 return write_demux_regids(uindices);
3346 int __init kvm_sys_reg_table_init(void)
3351 /* Make sure tables are unique and in order. */
3352 valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false);
3353 valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true);
3354 valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true);
3355 valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true);
3356 valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true);
3357 valid &= check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false);
3362 /* We abuse the reset function to overwrite the table itself. */
3363 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
3364 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);