1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/kvm/coproc.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Authors: Rusty Russell <rusty@rustcorp.com.au>
9 * Christoffer Dall <c.dall@virtualopensystems.com>
12 #include <linux/bitfield.h>
13 #include <linux/bsearch.h>
14 #include <linux/cacheinfo.h>
15 #include <linux/kvm_host.h>
17 #include <linux/printk.h>
18 #include <linux/uaccess.h>
20 #include <asm/cacheflush.h>
21 #include <asm/cputype.h>
22 #include <asm/debug-monitors.h>
24 #include <asm/kvm_arm.h>
25 #include <asm/kvm_emulate.h>
26 #include <asm/kvm_hyp.h>
27 #include <asm/kvm_mmu.h>
28 #include <asm/kvm_nested.h>
29 #include <asm/perf_event.h>
30 #include <asm/sysreg.h>
32 #include <trace/events/kvm.h>
39 * For AArch32, we only take care of what is being trapped. Anything
40 * that has to do with init and userspace access has to go via the
44 static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
46 static bool read_from_write_only(struct kvm_vcpu *vcpu,
47 struct sys_reg_params *params,
48 const struct sys_reg_desc *r)
50 WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
51 print_sys_reg_instr(params);
52 kvm_inject_undefined(vcpu);
56 static bool write_to_read_only(struct kvm_vcpu *vcpu,
57 struct sys_reg_params *params,
58 const struct sys_reg_desc *r)
60 WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
61 print_sys_reg_instr(params);
62 kvm_inject_undefined(vcpu);
66 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
68 u64 val = 0x8badf00d8badf00d;
70 if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
71 __vcpu_read_sys_reg_from_cpu(reg, &val))
74 return __vcpu_sys_reg(vcpu, reg);
77 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
79 if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
80 __vcpu_write_sys_reg_to_cpu(val, reg))
83 __vcpu_sys_reg(vcpu, reg) = val;
86 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
90 * Returns the minimum line size for the selected cache, expressed as
93 static u8 get_min_cache_line_size(bool icache)
95 u64 ctr = read_sanitised_ftr_reg(SYS_CTR_EL0);
99 field = SYS_FIELD_GET(CTR_EL0, IminLine, ctr);
101 field = SYS_FIELD_GET(CTR_EL0, DminLine, ctr);
104 * Cache line size is represented as Log2(words) in CTR_EL0.
105 * Log2(bytes) can be derived with the following:
107 * Log2(words) + 2 = Log2(bytes / 4) + 2
108 * = Log2(bytes) - 2 + 2
114 /* Which cache CCSIDR represents depends on CSSELR value. */
115 static u32 get_ccsidr(struct kvm_vcpu *vcpu, u32 csselr)
119 if (vcpu->arch.ccsidr)
120 return vcpu->arch.ccsidr[csselr];
122 line_size = get_min_cache_line_size(csselr & CSSELR_EL1_InD);
125 * Fabricate a CCSIDR value as the overriding value does not exist.
126 * The real CCSIDR value will not be used as it can vary by the
127 * physical CPU which the vcpu currently resides in.
129 * The line size is determined with get_min_cache_line_size(), which
130 * should be valid for all CPUs even if they have different cache
133 * The associativity bits are cleared, meaning the geometry of all data
134 * and unified caches (which are guaranteed to be PIPT and thus
135 * non-aliasing) are 1 set and 1 way.
136 * Guests should not be doing cache operations by set/way at all, and
137 * for this reason, we trap them and attempt to infer the intent, so
138 * that we can flush the entire guest's address space at the appropriate
139 * time. The exposed geometry minimizes the number of the traps.
140 * [If guests should attempt to infer aliasing properties from the
141 * geometry (which is not permitted by the architecture), they would
142 * only do so for virtually indexed caches.]
144 * We don't check if the cache level exists as it is allowed to return
145 * an UNKNOWN value if not.
147 return SYS_FIELD_PREP(CCSIDR_EL1, LineSize, line_size - 4);
150 static int set_ccsidr(struct kvm_vcpu *vcpu, u32 csselr, u32 val)
152 u8 line_size = FIELD_GET(CCSIDR_EL1_LineSize, val) + 4;
153 u32 *ccsidr = vcpu->arch.ccsidr;
156 if ((val & CCSIDR_EL1_RES0) ||
157 line_size < get_min_cache_line_size(csselr & CSSELR_EL1_InD))
161 if (val == get_ccsidr(vcpu, csselr))
164 ccsidr = kmalloc_array(CSSELR_MAX, sizeof(u32), GFP_KERNEL_ACCOUNT);
168 for (i = 0; i < CSSELR_MAX; i++)
169 ccsidr[i] = get_ccsidr(vcpu, i);
171 vcpu->arch.ccsidr = ccsidr;
174 ccsidr[csselr] = val;
179 static bool access_rw(struct kvm_vcpu *vcpu,
180 struct sys_reg_params *p,
181 const struct sys_reg_desc *r)
184 vcpu_write_sys_reg(vcpu, p->regval, r->reg);
186 p->regval = vcpu_read_sys_reg(vcpu, r->reg);
192 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
194 static bool access_dcsw(struct kvm_vcpu *vcpu,
195 struct sys_reg_params *p,
196 const struct sys_reg_desc *r)
199 return read_from_write_only(vcpu, p, r);
202 * Only track S/W ops if we don't have FWB. It still indicates
203 * that the guest is a bit broken (S/W operations should only
204 * be done by firmware, knowing that there is only a single
205 * CPU left in the system, and certainly not from non-secure
208 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
209 kvm_set_way_flush(vcpu);
214 static bool access_dcgsw(struct kvm_vcpu *vcpu,
215 struct sys_reg_params *p,
216 const struct sys_reg_desc *r)
218 if (!kvm_has_mte(vcpu->kvm)) {
219 kvm_inject_undefined(vcpu);
223 /* Treat MTE S/W ops as we treat the classic ones: with contempt */
224 return access_dcsw(vcpu, p, r);
227 static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
229 switch (r->aarch32_map) {
231 *mask = GENMASK_ULL(31, 0);
235 *mask = GENMASK_ULL(63, 32);
239 *mask = GENMASK_ULL(63, 0);
246 * Generic accessor for VM registers. Only called as long as HCR_TVM
247 * is set. If the guest enables the MMU, we stop trapping the VM
248 * sys_regs and leave it in complete control of the caches.
250 static bool access_vm_reg(struct kvm_vcpu *vcpu,
251 struct sys_reg_params *p,
252 const struct sys_reg_desc *r)
254 bool was_enabled = vcpu_has_cache_enabled(vcpu);
255 u64 val, mask, shift;
257 BUG_ON(!p->is_write);
259 get_access_mask(r, &mask, &shift);
262 val = vcpu_read_sys_reg(vcpu, r->reg);
268 val |= (p->regval & (mask >> shift)) << shift;
269 vcpu_write_sys_reg(vcpu, val, r->reg);
271 kvm_toggle_cache(vcpu, was_enabled);
275 static bool access_actlr(struct kvm_vcpu *vcpu,
276 struct sys_reg_params *p,
277 const struct sys_reg_desc *r)
282 return ignore_write(vcpu, p);
284 get_access_mask(r, &mask, &shift);
285 p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift;
291 * Trap handler for the GICv3 SGI generation system register.
292 * Forward the request to the VGIC emulation.
293 * The cp15_64 code makes sure this automatically works
294 * for both AArch64 and AArch32 accesses.
296 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
297 struct sys_reg_params *p,
298 const struct sys_reg_desc *r)
303 return read_from_write_only(vcpu, p, r);
306 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
307 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
308 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
309 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
312 if (p->Op0 == 0) { /* AArch32 */
314 default: /* Keep GCC quiet */
315 case 0: /* ICC_SGI1R */
318 case 1: /* ICC_ASGI1R */
319 case 2: /* ICC_SGI0R */
323 } else { /* AArch64 */
325 default: /* Keep GCC quiet */
326 case 5: /* ICC_SGI1R_EL1 */
329 case 6: /* ICC_ASGI1R_EL1 */
330 case 7: /* ICC_SGI0R_EL1 */
336 vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
341 static bool access_gic_sre(struct kvm_vcpu *vcpu,
342 struct sys_reg_params *p,
343 const struct sys_reg_desc *r)
346 return ignore_write(vcpu, p);
348 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
352 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
353 struct sys_reg_params *p,
354 const struct sys_reg_desc *r)
357 return ignore_write(vcpu, p);
359 return read_zero(vcpu, p);
362 static bool trap_undef(struct kvm_vcpu *vcpu,
363 struct sys_reg_params *p,
364 const struct sys_reg_desc *r)
366 kvm_inject_undefined(vcpu);
371 * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
372 * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
373 * system, these registers should UNDEF. LORID_EL1 being a RO register, we
374 * treat it separately.
376 static bool trap_loregion(struct kvm_vcpu *vcpu,
377 struct sys_reg_params *p,
378 const struct sys_reg_desc *r)
380 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
381 u32 sr = reg_to_encoding(r);
383 if (!(val & (0xfUL << ID_AA64MMFR1_EL1_LO_SHIFT))) {
384 kvm_inject_undefined(vcpu);
388 if (p->is_write && sr == SYS_LORID_EL1)
389 return write_to_read_only(vcpu, p, r);
391 return trap_raz_wi(vcpu, p, r);
394 static bool trap_oslar_el1(struct kvm_vcpu *vcpu,
395 struct sys_reg_params *p,
396 const struct sys_reg_desc *r)
401 return read_from_write_only(vcpu, p, r);
403 /* Forward the OSLK bit to OSLSR */
404 oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~SYS_OSLSR_OSLK;
405 if (p->regval & SYS_OSLAR_OSLK)
406 oslsr |= SYS_OSLSR_OSLK;
408 __vcpu_sys_reg(vcpu, OSLSR_EL1) = oslsr;
412 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
413 struct sys_reg_params *p,
414 const struct sys_reg_desc *r)
417 return write_to_read_only(vcpu, p, r);
419 p->regval = __vcpu_sys_reg(vcpu, r->reg);
423 static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
427 * The only modifiable bit is the OSLK bit. Refuse the write if
428 * userspace attempts to change any other bit in the register.
430 if ((val ^ rd->val) & ~SYS_OSLSR_OSLK)
433 __vcpu_sys_reg(vcpu, rd->reg) = val;
437 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
438 struct sys_reg_params *p,
439 const struct sys_reg_desc *r)
442 return ignore_write(vcpu, p);
444 p->regval = read_sysreg(dbgauthstatus_el1);
450 * We want to avoid world-switching all the DBG registers all the
453 * - If we've touched any debug register, it is likely that we're
454 * going to touch more of them. It then makes sense to disable the
455 * traps and start doing the save/restore dance
456 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
457 * then mandatory to save/restore the registers, as the guest
460 * For this, we use a DIRTY bit, indicating the guest has modified the
461 * debug registers, used as follow:
464 * - If the dirty bit is set (because we're coming back from trapping),
465 * disable the traps, save host registers, restore guest registers.
466 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
467 * set the dirty bit, disable the traps, save host registers,
468 * restore guest registers.
469 * - Otherwise, enable the traps
472 * - If the dirty bit is set, save guest registers, restore host
473 * registers and clear the dirty bit. This ensure that the host can
474 * now use the debug registers.
476 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
477 struct sys_reg_params *p,
478 const struct sys_reg_desc *r)
480 access_rw(vcpu, p, r);
482 vcpu_set_flag(vcpu, DEBUG_DIRTY);
484 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
490 * reg_to_dbg/dbg_to_reg
492 * A 32 bit write to a debug register leave top bits alone
493 * A 32 bit read from a debug register only returns the bottom bits
495 * All writes will set the DEBUG_DIRTY flag to ensure the hyp code
496 * switches between host and guest values in future.
498 static void reg_to_dbg(struct kvm_vcpu *vcpu,
499 struct sys_reg_params *p,
500 const struct sys_reg_desc *rd,
503 u64 mask, shift, val;
505 get_access_mask(rd, &mask, &shift);
509 val |= (p->regval & (mask >> shift)) << shift;
512 vcpu_set_flag(vcpu, DEBUG_DIRTY);
515 static void dbg_to_reg(struct kvm_vcpu *vcpu,
516 struct sys_reg_params *p,
517 const struct sys_reg_desc *rd,
522 get_access_mask(rd, &mask, &shift);
523 p->regval = (*dbg_reg & mask) >> shift;
526 static bool trap_bvr(struct kvm_vcpu *vcpu,
527 struct sys_reg_params *p,
528 const struct sys_reg_desc *rd)
530 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
533 reg_to_dbg(vcpu, p, rd, dbg_reg);
535 dbg_to_reg(vcpu, p, rd, dbg_reg);
537 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
542 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
545 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = val;
549 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
552 *val = vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
556 static void reset_bvr(struct kvm_vcpu *vcpu,
557 const struct sys_reg_desc *rd)
559 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
562 static bool trap_bcr(struct kvm_vcpu *vcpu,
563 struct sys_reg_params *p,
564 const struct sys_reg_desc *rd)
566 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
569 reg_to_dbg(vcpu, p, rd, dbg_reg);
571 dbg_to_reg(vcpu, p, rd, dbg_reg);
573 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
578 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
581 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = val;
585 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
588 *val = vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
592 static void reset_bcr(struct kvm_vcpu *vcpu,
593 const struct sys_reg_desc *rd)
595 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
598 static bool trap_wvr(struct kvm_vcpu *vcpu,
599 struct sys_reg_params *p,
600 const struct sys_reg_desc *rd)
602 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
605 reg_to_dbg(vcpu, p, rd, dbg_reg);
607 dbg_to_reg(vcpu, p, rd, dbg_reg);
609 trace_trap_reg(__func__, rd->CRm, p->is_write,
610 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
615 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
618 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = val;
622 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
625 *val = vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
629 static void reset_wvr(struct kvm_vcpu *vcpu,
630 const struct sys_reg_desc *rd)
632 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
635 static bool trap_wcr(struct kvm_vcpu *vcpu,
636 struct sys_reg_params *p,
637 const struct sys_reg_desc *rd)
639 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
642 reg_to_dbg(vcpu, p, rd, dbg_reg);
644 dbg_to_reg(vcpu, p, rd, dbg_reg);
646 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
651 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
654 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = val;
658 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
661 *val = vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
665 static void reset_wcr(struct kvm_vcpu *vcpu,
666 const struct sys_reg_desc *rd)
668 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
671 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
673 u64 amair = read_sysreg(amair_el1);
674 vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
677 static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
679 u64 actlr = read_sysreg(actlr_el1);
680 vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
683 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
688 * Map the vcpu_id into the first three affinity level fields of
689 * the MPIDR. We limit the number of VCPUs in level 0 due to a
690 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
691 * of the GICv3 to be able to address each CPU directly when
694 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
695 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
696 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
697 vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
700 static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
701 const struct sys_reg_desc *r)
703 if (kvm_vcpu_has_pmu(vcpu))
709 static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
711 u64 n, mask = BIT(ARMV8_PMU_CYCLE_IDX);
713 /* No PMU available, any PMU reg may UNDEF... */
714 if (!kvm_arm_support_pmu_v3())
717 n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
718 n &= ARMV8_PMU_PMCR_N_MASK;
720 mask |= GENMASK(n - 1, 0);
722 reset_unknown(vcpu, r);
723 __vcpu_sys_reg(vcpu, r->reg) &= mask;
726 static void reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
728 reset_unknown(vcpu, r);
729 __vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
732 static void reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
734 reset_unknown(vcpu, r);
735 __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK;
738 static void reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
740 reset_unknown(vcpu, r);
741 __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
744 static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
748 /* No PMU available, PMCR_EL0 may UNDEF... */
749 if (!kvm_arm_support_pmu_v3())
752 /* Only preserve PMCR_EL0.N, and reset the rest to 0 */
753 pmcr = read_sysreg(pmcr_el0) & (ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
754 if (!kvm_supports_32bit_el0())
755 pmcr |= ARMV8_PMU_PMCR_LC;
757 __vcpu_sys_reg(vcpu, r->reg) = pmcr;
760 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
762 u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
763 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
766 kvm_inject_undefined(vcpu);
771 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
773 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
776 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
778 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
781 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
783 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
786 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
788 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
791 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
792 const struct sys_reg_desc *r)
796 if (pmu_access_el0_disabled(vcpu))
801 * Only update writeable bits of PMCR (continuing into
802 * kvm_pmu_handle_pmcr() as well)
804 val = __vcpu_sys_reg(vcpu, PMCR_EL0);
805 val &= ~ARMV8_PMU_PMCR_MASK;
806 val |= p->regval & ARMV8_PMU_PMCR_MASK;
807 if (!kvm_supports_32bit_el0())
808 val |= ARMV8_PMU_PMCR_LC;
809 kvm_pmu_handle_pmcr(vcpu, val);
811 /* PMCR.P & PMCR.C are RAZ */
812 val = __vcpu_sys_reg(vcpu, PMCR_EL0)
813 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
820 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
821 const struct sys_reg_desc *r)
823 if (pmu_access_event_counter_el0_disabled(vcpu))
827 __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
829 /* return PMSELR.SEL field */
830 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
831 & ARMV8_PMU_COUNTER_MASK;
836 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
837 const struct sys_reg_desc *r)
839 u64 pmceid, mask, shift;
843 if (pmu_access_el0_disabled(vcpu))
846 get_access_mask(r, &mask, &shift);
848 pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
857 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
861 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
862 val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
863 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
864 kvm_inject_undefined(vcpu);
871 static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
876 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
878 idx = ARMV8_PMU_CYCLE_IDX;
881 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
883 *val = kvm_pmu_get_counter_value(vcpu, idx);
887 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
888 struct sys_reg_params *p,
889 const struct sys_reg_desc *r)
893 if (r->CRn == 9 && r->CRm == 13) {
896 if (pmu_access_event_counter_el0_disabled(vcpu))
899 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
900 & ARMV8_PMU_COUNTER_MASK;
901 } else if (r->Op2 == 0) {
903 if (pmu_access_cycle_counter_el0_disabled(vcpu))
906 idx = ARMV8_PMU_CYCLE_IDX;
908 } else if (r->CRn == 0 && r->CRm == 9) {
910 if (pmu_access_event_counter_el0_disabled(vcpu))
913 idx = ARMV8_PMU_CYCLE_IDX;
914 } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
916 if (pmu_access_event_counter_el0_disabled(vcpu))
919 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
922 /* Catch any decoding mistake */
923 WARN_ON(idx == ~0UL);
925 if (!pmu_counter_idx_valid(vcpu, idx))
929 if (pmu_access_el0_disabled(vcpu))
932 kvm_pmu_set_counter_value(vcpu, idx, p->regval);
934 p->regval = kvm_pmu_get_counter_value(vcpu, idx);
940 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
941 const struct sys_reg_desc *r)
945 if (pmu_access_el0_disabled(vcpu))
948 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
950 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
951 reg = PMEVTYPER0_EL0 + idx;
952 } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
953 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
954 if (idx == ARMV8_PMU_CYCLE_IDX)
958 reg = PMEVTYPER0_EL0 + idx;
963 if (!pmu_counter_idx_valid(vcpu, idx))
967 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
968 __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
969 kvm_vcpu_pmu_restore_guest(vcpu);
971 p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
977 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
978 const struct sys_reg_desc *r)
982 if (pmu_access_el0_disabled(vcpu))
985 mask = kvm_pmu_valid_counter_mask(vcpu);
987 val = p->regval & mask;
989 /* accessing PMCNTENSET_EL0 */
990 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
991 kvm_pmu_enable_counter_mask(vcpu, val);
992 kvm_vcpu_pmu_restore_guest(vcpu);
994 /* accessing PMCNTENCLR_EL0 */
995 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
996 kvm_pmu_disable_counter_mask(vcpu, val);
999 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
1005 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1006 const struct sys_reg_desc *r)
1008 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
1010 if (check_pmu_access_disabled(vcpu, 0))
1014 u64 val = p->regval & mask;
1017 /* accessing PMINTENSET_EL1 */
1018 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
1020 /* accessing PMINTENCLR_EL1 */
1021 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
1023 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
1029 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1030 const struct sys_reg_desc *r)
1032 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
1034 if (pmu_access_el0_disabled(vcpu))
1039 /* accessing PMOVSSET_EL0 */
1040 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
1042 /* accessing PMOVSCLR_EL0 */
1043 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
1045 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
1051 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1052 const struct sys_reg_desc *r)
1057 return read_from_write_only(vcpu, p, r);
1059 if (pmu_write_swinc_el0_disabled(vcpu))
1062 mask = kvm_pmu_valid_counter_mask(vcpu);
1063 kvm_pmu_software_increment(vcpu, p->regval & mask);
1067 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1068 const struct sys_reg_desc *r)
1071 if (!vcpu_mode_priv(vcpu)) {
1072 kvm_inject_undefined(vcpu);
1076 __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
1077 p->regval & ARMV8_PMU_USERENR_MASK;
1079 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
1080 & ARMV8_PMU_USERENR_MASK;
1086 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
1087 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
1088 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
1089 trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
1090 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
1091 trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
1092 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
1093 trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
1094 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
1095 trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
1097 #define PMU_SYS_REG(r) \
1098 SYS_DESC(r), .reset = reset_pmu_reg, .visibility = pmu_visibility
1100 /* Macro to expand the PMEVCNTRn_EL0 register */
1101 #define PMU_PMEVCNTR_EL0(n) \
1102 { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
1103 .reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \
1104 .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
1106 /* Macro to expand the PMEVTYPERn_EL0 register */
1107 #define PMU_PMEVTYPER_EL0(n) \
1108 { PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \
1109 .reset = reset_pmevtyper, \
1110 .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
1112 static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1113 const struct sys_reg_desc *r)
1115 kvm_inject_undefined(vcpu);
1120 /* Macro to expand the AMU counter and type registers*/
1121 #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
1122 #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
1123 #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
1124 #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
1126 static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1127 const struct sys_reg_desc *rd)
1129 return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
1133 * If we land here on a PtrAuth access, that is because we didn't
1134 * fixup the access on exit by allowing the PtrAuth sysregs. The only
1135 * way this happens is when the guest does not have PtrAuth support
1138 #define __PTRAUTH_KEY(k) \
1139 { SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \
1140 .visibility = ptrauth_visibility}
1142 #define PTRAUTH_KEY(k) \
1143 __PTRAUTH_KEY(k ## KEYLO_EL1), \
1144 __PTRAUTH_KEY(k ## KEYHI_EL1)
1146 static bool access_arch_timer(struct kvm_vcpu *vcpu,
1147 struct sys_reg_params *p,
1148 const struct sys_reg_desc *r)
1150 enum kvm_arch_timers tmr;
1151 enum kvm_arch_timer_regs treg;
1152 u64 reg = reg_to_encoding(r);
1155 case SYS_CNTP_TVAL_EL0:
1156 case SYS_AARCH32_CNTP_TVAL:
1158 treg = TIMER_REG_TVAL;
1160 case SYS_CNTP_CTL_EL0:
1161 case SYS_AARCH32_CNTP_CTL:
1163 treg = TIMER_REG_CTL;
1165 case SYS_CNTP_CVAL_EL0:
1166 case SYS_AARCH32_CNTP_CVAL:
1168 treg = TIMER_REG_CVAL;
1170 case SYS_CNTPCT_EL0:
1171 case SYS_CNTPCTSS_EL0:
1172 case SYS_AARCH32_CNTPCT:
1174 treg = TIMER_REG_CNT;
1177 print_sys_reg_msg(p, "%s", "Unhandled trapped timer register");
1178 kvm_inject_undefined(vcpu);
1183 kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1185 p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
1190 static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu)
1192 if (kvm_vcpu_has_pmu(vcpu))
1193 return vcpu->kvm->arch.dfr0_pmuver.imp;
1195 return vcpu->kvm->arch.dfr0_pmuver.unimp;
1198 static u8 perfmon_to_pmuver(u8 perfmon)
1201 case ID_DFR0_EL1_PerfMon_PMUv3:
1202 return ID_AA64DFR0_EL1_PMUVer_IMP;
1203 case ID_DFR0_EL1_PerfMon_IMPDEF:
1204 return ID_AA64DFR0_EL1_PMUVer_IMP_DEF;
1206 /* Anything ARMv8.1+ and NI have the same value. For now. */
1211 static u8 pmuver_to_perfmon(u8 pmuver)
1214 case ID_AA64DFR0_EL1_PMUVer_IMP:
1215 return ID_DFR0_EL1_PerfMon_PMUv3;
1216 case ID_AA64DFR0_EL1_PMUVer_IMP_DEF:
1217 return ID_DFR0_EL1_PerfMon_IMPDEF;
1219 /* Anything ARMv8.1+ and NI have the same value. For now. */
1224 /* Read a sanitised cpufeature ID register by sys_reg_desc */
1225 static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r)
1227 u32 id = reg_to_encoding(r);
1230 if (sysreg_visible_as_raz(vcpu, r))
1233 val = read_sanitised_ftr_reg(id);
1236 case SYS_ID_AA64PFR0_EL1:
1237 if (!vcpu_has_sve(vcpu))
1238 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE);
1239 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU);
1240 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2);
1241 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
1242 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3);
1243 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
1244 if (kvm_vgic_global_state.type == VGIC_V3) {
1245 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC);
1246 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1);
1249 case SYS_ID_AA64PFR1_EL1:
1250 if (!kvm_has_mte(vcpu->kvm))
1251 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
1253 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
1255 case SYS_ID_AA64ISAR1_EL1:
1256 if (!vcpu_has_ptrauth(vcpu))
1257 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
1258 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
1259 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
1260 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
1262 case SYS_ID_AA64ISAR2_EL1:
1263 if (!vcpu_has_ptrauth(vcpu))
1264 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
1265 ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
1266 if (!cpus_have_final_cap(ARM64_HAS_WFXT))
1267 val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
1269 case SYS_ID_AA64DFR0_EL1:
1270 /* Limit debug to ARMv8.0 */
1271 val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer);
1272 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), 6);
1273 /* Set PMUver to the required version */
1274 val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
1275 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
1277 /* Hide SPE from guests */
1278 val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer);
1280 case SYS_ID_DFR0_EL1:
1281 val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
1282 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon),
1283 pmuver_to_perfmon(vcpu_pmuver(vcpu)));
1285 case SYS_ID_AA64MMFR2_EL1:
1286 val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
1288 case SYS_ID_MMFR4_EL1:
1289 val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
1296 static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
1297 const struct sys_reg_desc *r)
1299 u32 id = reg_to_encoding(r);
1302 case SYS_ID_AA64ZFR0_EL1:
1303 if (!vcpu_has_sve(vcpu))
1311 static unsigned int aa32_id_visibility(const struct kvm_vcpu *vcpu,
1312 const struct sys_reg_desc *r)
1315 * AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any
1316 * EL. Promote to RAZ/WI in order to guarantee consistency between
1319 if (!kvm_supports_32bit_el0())
1320 return REG_RAZ | REG_USER_WI;
1322 return id_visibility(vcpu, r);
1325 static unsigned int raz_visibility(const struct kvm_vcpu *vcpu,
1326 const struct sys_reg_desc *r)
1331 /* cpufeature ID register access trap handlers */
1333 static bool access_id_reg(struct kvm_vcpu *vcpu,
1334 struct sys_reg_params *p,
1335 const struct sys_reg_desc *r)
1338 return write_to_read_only(vcpu, p, r);
1340 p->regval = read_id_reg(vcpu, r);
1341 if (vcpu_has_nv(vcpu))
1342 access_nested_id_reg(vcpu, p, r);
1347 /* Visibility overrides for SVE-specific control registers */
1348 static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
1349 const struct sys_reg_desc *rd)
1351 if (vcpu_has_sve(vcpu))
1357 static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
1358 const struct sys_reg_desc *rd,
1364 * Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as
1365 * it doesn't promise more than what is actually provided (the
1366 * guest could otherwise be covered in ectoplasmic residue).
1368 csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV2_SHIFT);
1370 (csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED))
1373 /* Same thing for CSV3 */
1374 csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV3_SHIFT);
1376 (csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED))
1379 /* We can only differ with CSV[23], and anything else is an error */
1380 val ^= read_id_reg(vcpu, rd);
1381 val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
1382 ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
1386 vcpu->kvm->arch.pfr0_csv2 = csv2;
1387 vcpu->kvm->arch.pfr0_csv3 = csv3;
1392 static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
1393 const struct sys_reg_desc *rd,
1396 u8 pmuver, host_pmuver;
1399 host_pmuver = kvm_arm_pmu_get_pmuver_limit();
1402 * Allow AA64DFR0_EL1.PMUver to be set from userspace as long
1403 * as it doesn't promise more than what the HW gives us. We
1404 * allow an IMPDEF PMU though, only if no PMU is supported
1405 * (KVM backward compatibility handling).
1407 pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), val);
1408 if ((pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF && pmuver > host_pmuver))
1411 valid_pmu = (pmuver != 0 && pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF);
1413 /* Make sure view register and PMU support do match */
1414 if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
1417 /* We can only differ with PMUver, and anything else is an error */
1418 val ^= read_id_reg(vcpu, rd);
1419 val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
1424 vcpu->kvm->arch.dfr0_pmuver.imp = pmuver;
1426 vcpu->kvm->arch.dfr0_pmuver.unimp = pmuver;
1431 static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
1432 const struct sys_reg_desc *rd,
1435 u8 perfmon, host_perfmon;
1438 host_perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
1441 * Allow DFR0_EL1.PerfMon to be set from userspace as long as
1442 * it doesn't promise more than what the HW gives us on the
1443 * AArch64 side (as everything is emulated with that), and
1444 * that this is a PMUv3.
1446 perfmon = FIELD_GET(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon), val);
1447 if ((perfmon != ID_DFR0_EL1_PerfMon_IMPDEF && perfmon > host_perfmon) ||
1448 (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3))
1451 valid_pmu = (perfmon != 0 && perfmon != ID_DFR0_EL1_PerfMon_IMPDEF);
1453 /* Make sure view register and PMU support do match */
1454 if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
1457 /* We can only differ with PerfMon, and anything else is an error */
1458 val ^= read_id_reg(vcpu, rd);
1459 val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
1464 vcpu->kvm->arch.dfr0_pmuver.imp = perfmon_to_pmuver(perfmon);
1466 vcpu->kvm->arch.dfr0_pmuver.unimp = perfmon_to_pmuver(perfmon);
1472 * cpufeature ID register user accessors
1474 * For now, these registers are immutable for userspace, so no values
1475 * are stored, and for set_id_reg() we don't allow the effective value
1478 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1481 *val = read_id_reg(vcpu, rd);
1485 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1488 /* This is what we mean by invariant: you can't change it. */
1489 if (val != read_id_reg(vcpu, rd))
1495 static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1502 static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1508 static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1509 const struct sys_reg_desc *r)
1512 return write_to_read_only(vcpu, p, r);
1514 p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
1518 static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1519 const struct sys_reg_desc *r)
1522 return write_to_read_only(vcpu, p, r);
1524 p->regval = __vcpu_sys_reg(vcpu, r->reg);
1529 * Fabricate a CLIDR_EL1 value instead of using the real value, which can vary
1530 * by the physical CPU which the vcpu currently resides in.
1532 static void reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
1534 u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
1538 if ((ctr_el0 & CTR_EL0_IDC)) {
1540 * Data cache clean to the PoU is not required so LoUU and LoUIS
1541 * will not be set and a unified cache, which will be marked as
1542 * LoC, will be added.
1544 * If not DIC, let the unified cache L2 so that an instruction
1545 * cache can be added as L1 later.
1547 loc = (ctr_el0 & CTR_EL0_DIC) ? 1 : 2;
1548 clidr = CACHE_TYPE_UNIFIED << CLIDR_CTYPE_SHIFT(loc);
1551 * Data cache clean to the PoU is required so let L1 have a data
1552 * cache and mark it as LoUU and LoUIS. As L1 has a data cache,
1553 * it can be marked as LoC too.
1556 clidr = 1 << CLIDR_LOUU_SHIFT;
1557 clidr |= 1 << CLIDR_LOUIS_SHIFT;
1558 clidr |= CACHE_TYPE_DATA << CLIDR_CTYPE_SHIFT(1);
1562 * Instruction cache invalidation to the PoU is required so let L1 have
1563 * an instruction cache. If L1 already has a data cache, it will be
1564 * CACHE_TYPE_SEPARATE.
1566 if (!(ctr_el0 & CTR_EL0_DIC))
1567 clidr |= CACHE_TYPE_INST << CLIDR_CTYPE_SHIFT(1);
1569 clidr |= loc << CLIDR_LOC_SHIFT;
1572 * Add tag cache unified to data cache. Allocation tags and data are
1573 * unified in a cache line so that it looks valid even if there is only
1576 if (kvm_has_mte(vcpu->kvm))
1577 clidr |= 2 << CLIDR_TTYPE_SHIFT(loc);
1579 __vcpu_sys_reg(vcpu, r->reg) = clidr;
1582 static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1585 u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
1586 u64 idc = !CLIDR_LOC(val) || (!CLIDR_LOUIS(val) && !CLIDR_LOUU(val));
1588 if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc))
1591 __vcpu_sys_reg(vcpu, rd->reg) = val;
1596 static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1597 const struct sys_reg_desc *r)
1602 vcpu_write_sys_reg(vcpu, p->regval, reg);
1604 p->regval = vcpu_read_sys_reg(vcpu, reg);
1608 static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1609 const struct sys_reg_desc *r)
1614 return write_to_read_only(vcpu, p, r);
1616 csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
1617 csselr &= CSSELR_EL1_Level | CSSELR_EL1_InD;
1618 if (csselr < CSSELR_MAX)
1619 p->regval = get_ccsidr(vcpu, csselr);
1624 static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
1625 const struct sys_reg_desc *rd)
1627 if (kvm_has_mte(vcpu->kvm))
1633 #define MTE_REG(name) { \
1634 SYS_DESC(SYS_##name), \
1635 .access = undef_access, \
1636 .reset = reset_unknown, \
1638 .visibility = mte_visibility, \
1641 static unsigned int el2_visibility(const struct kvm_vcpu *vcpu,
1642 const struct sys_reg_desc *rd)
1644 if (vcpu_has_nv(vcpu))
1650 #define EL2_REG(name, acc, rst, v) { \
1651 SYS_DESC(SYS_##name), \
1655 .visibility = el2_visibility, \
1660 * EL{0,1}2 registers are the EL2 view on an EL0 or EL1 register when
1661 * HCR_EL2.E2H==1, and only in the sysreg table for convenience of
1662 * handling traps. Given that, they are always hidden from userspace.
1664 static unsigned int elx2_visibility(const struct kvm_vcpu *vcpu,
1665 const struct sys_reg_desc *rd)
1667 return REG_HIDDEN_USER;
1670 #define EL12_REG(name, acc, rst, v) { \
1671 SYS_DESC(SYS_##name##_EL12), \
1674 .reg = name##_EL1, \
1676 .visibility = elx2_visibility, \
1679 /* sys_reg_desc initialiser for known cpufeature ID registers */
1680 #define ID_SANITISED(name) { \
1681 SYS_DESC(SYS_##name), \
1682 .access = access_id_reg, \
1683 .get_user = get_id_reg, \
1684 .set_user = set_id_reg, \
1685 .visibility = id_visibility, \
1688 /* sys_reg_desc initialiser for known cpufeature ID registers */
1689 #define AA32_ID_SANITISED(name) { \
1690 SYS_DESC(SYS_##name), \
1691 .access = access_id_reg, \
1692 .get_user = get_id_reg, \
1693 .set_user = set_id_reg, \
1694 .visibility = aa32_id_visibility, \
1698 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
1699 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
1700 * (1 <= crm < 8, 0 <= Op2 < 8).
1702 #define ID_UNALLOCATED(crm, op2) { \
1703 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
1704 .access = access_id_reg, \
1705 .get_user = get_id_reg, \
1706 .set_user = set_id_reg, \
1707 .visibility = raz_visibility \
1711 * sys_reg_desc initialiser for known ID registers that we hide from guests.
1712 * For now, these are exposed just like unallocated ID regs: they appear
1713 * RAZ for the guest.
1715 #define ID_HIDDEN(name) { \
1716 SYS_DESC(SYS_##name), \
1717 .access = access_id_reg, \
1718 .get_user = get_id_reg, \
1719 .set_user = set_id_reg, \
1720 .visibility = raz_visibility, \
1723 static bool access_sp_el1(struct kvm_vcpu *vcpu,
1724 struct sys_reg_params *p,
1725 const struct sys_reg_desc *r)
1728 __vcpu_sys_reg(vcpu, SP_EL1) = p->regval;
1730 p->regval = __vcpu_sys_reg(vcpu, SP_EL1);
1735 static bool access_elr(struct kvm_vcpu *vcpu,
1736 struct sys_reg_params *p,
1737 const struct sys_reg_desc *r)
1740 vcpu_write_sys_reg(vcpu, p->regval, ELR_EL1);
1742 p->regval = vcpu_read_sys_reg(vcpu, ELR_EL1);
1747 static bool access_spsr(struct kvm_vcpu *vcpu,
1748 struct sys_reg_params *p,
1749 const struct sys_reg_desc *r)
1752 __vcpu_sys_reg(vcpu, SPSR_EL1) = p->regval;
1754 p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1);
1760 * Architected system registers.
1761 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
1763 * Debug handling: We do trap most, if not all debug related system
1764 * registers. The implementation is good enough to ensure that a guest
1765 * can use these with minimal performance degradation. The drawback is
1766 * that we don't implement any of the external debug architecture.
1767 * This should be revisited if we ever encounter a more demanding
1770 static const struct sys_reg_desc sys_reg_descs[] = {
1771 { SYS_DESC(SYS_DC_ISW), access_dcsw },
1772 { SYS_DESC(SYS_DC_IGSW), access_dcgsw },
1773 { SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
1774 { SYS_DESC(SYS_DC_CSW), access_dcsw },
1775 { SYS_DESC(SYS_DC_CGSW), access_dcgsw },
1776 { SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
1777 { SYS_DESC(SYS_DC_CISW), access_dcsw },
1778 { SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
1779 { SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
1781 DBG_BCR_BVR_WCR_WVR_EL1(0),
1782 DBG_BCR_BVR_WCR_WVR_EL1(1),
1783 { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
1784 { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
1785 DBG_BCR_BVR_WCR_WVR_EL1(2),
1786 DBG_BCR_BVR_WCR_WVR_EL1(3),
1787 DBG_BCR_BVR_WCR_WVR_EL1(4),
1788 DBG_BCR_BVR_WCR_WVR_EL1(5),
1789 DBG_BCR_BVR_WCR_WVR_EL1(6),
1790 DBG_BCR_BVR_WCR_WVR_EL1(7),
1791 DBG_BCR_BVR_WCR_WVR_EL1(8),
1792 DBG_BCR_BVR_WCR_WVR_EL1(9),
1793 DBG_BCR_BVR_WCR_WVR_EL1(10),
1794 DBG_BCR_BVR_WCR_WVR_EL1(11),
1795 DBG_BCR_BVR_WCR_WVR_EL1(12),
1796 DBG_BCR_BVR_WCR_WVR_EL1(13),
1797 DBG_BCR_BVR_WCR_WVR_EL1(14),
1798 DBG_BCR_BVR_WCR_WVR_EL1(15),
1800 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
1801 { SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 },
1802 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1,
1803 SYS_OSLSR_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, },
1804 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
1805 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
1806 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
1807 { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
1808 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
1810 { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
1811 { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
1812 // DBGDTR[TR]X_EL0 share the same encoding
1813 { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
1815 { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
1817 { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
1820 * ID regs: all ID_SANITISED() entries here must have corresponding
1821 * entries in arm64_ftr_regs[].
1824 /* AArch64 mappings of the AArch32 ID registers */
1826 AA32_ID_SANITISED(ID_PFR0_EL1),
1827 AA32_ID_SANITISED(ID_PFR1_EL1),
1828 { SYS_DESC(SYS_ID_DFR0_EL1), .access = access_id_reg,
1829 .get_user = get_id_reg, .set_user = set_id_dfr0_el1,
1830 .visibility = aa32_id_visibility, },
1831 ID_HIDDEN(ID_AFR0_EL1),
1832 AA32_ID_SANITISED(ID_MMFR0_EL1),
1833 AA32_ID_SANITISED(ID_MMFR1_EL1),
1834 AA32_ID_SANITISED(ID_MMFR2_EL1),
1835 AA32_ID_SANITISED(ID_MMFR3_EL1),
1838 AA32_ID_SANITISED(ID_ISAR0_EL1),
1839 AA32_ID_SANITISED(ID_ISAR1_EL1),
1840 AA32_ID_SANITISED(ID_ISAR2_EL1),
1841 AA32_ID_SANITISED(ID_ISAR3_EL1),
1842 AA32_ID_SANITISED(ID_ISAR4_EL1),
1843 AA32_ID_SANITISED(ID_ISAR5_EL1),
1844 AA32_ID_SANITISED(ID_MMFR4_EL1),
1845 AA32_ID_SANITISED(ID_ISAR6_EL1),
1848 AA32_ID_SANITISED(MVFR0_EL1),
1849 AA32_ID_SANITISED(MVFR1_EL1),
1850 AA32_ID_SANITISED(MVFR2_EL1),
1851 ID_UNALLOCATED(3,3),
1852 AA32_ID_SANITISED(ID_PFR2_EL1),
1853 ID_HIDDEN(ID_DFR1_EL1),
1854 AA32_ID_SANITISED(ID_MMFR5_EL1),
1855 ID_UNALLOCATED(3,7),
1857 /* AArch64 ID registers */
1859 { SYS_DESC(SYS_ID_AA64PFR0_EL1), .access = access_id_reg,
1860 .get_user = get_id_reg, .set_user = set_id_aa64pfr0_el1, },
1861 ID_SANITISED(ID_AA64PFR1_EL1),
1862 ID_UNALLOCATED(4,2),
1863 ID_UNALLOCATED(4,3),
1864 ID_SANITISED(ID_AA64ZFR0_EL1),
1865 ID_HIDDEN(ID_AA64SMFR0_EL1),
1866 ID_UNALLOCATED(4,6),
1867 ID_UNALLOCATED(4,7),
1870 { SYS_DESC(SYS_ID_AA64DFR0_EL1), .access = access_id_reg,
1871 .get_user = get_id_reg, .set_user = set_id_aa64dfr0_el1, },
1872 ID_SANITISED(ID_AA64DFR1_EL1),
1873 ID_UNALLOCATED(5,2),
1874 ID_UNALLOCATED(5,3),
1875 ID_HIDDEN(ID_AA64AFR0_EL1),
1876 ID_HIDDEN(ID_AA64AFR1_EL1),
1877 ID_UNALLOCATED(5,6),
1878 ID_UNALLOCATED(5,7),
1881 ID_SANITISED(ID_AA64ISAR0_EL1),
1882 ID_SANITISED(ID_AA64ISAR1_EL1),
1883 ID_SANITISED(ID_AA64ISAR2_EL1),
1884 ID_UNALLOCATED(6,3),
1885 ID_UNALLOCATED(6,4),
1886 ID_UNALLOCATED(6,5),
1887 ID_UNALLOCATED(6,6),
1888 ID_UNALLOCATED(6,7),
1891 ID_SANITISED(ID_AA64MMFR0_EL1),
1892 ID_SANITISED(ID_AA64MMFR1_EL1),
1893 ID_SANITISED(ID_AA64MMFR2_EL1),
1894 ID_UNALLOCATED(7,3),
1895 ID_UNALLOCATED(7,4),
1896 ID_UNALLOCATED(7,5),
1897 ID_UNALLOCATED(7,6),
1898 ID_UNALLOCATED(7,7),
1900 { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
1901 { SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
1902 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
1907 { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
1908 { SYS_DESC(SYS_TRFCR_EL1), undef_access },
1909 { SYS_DESC(SYS_SMPRI_EL1), undef_access },
1910 { SYS_DESC(SYS_SMCR_EL1), undef_access },
1911 { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
1912 { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
1913 { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
1921 { SYS_DESC(SYS_SPSR_EL1), access_spsr},
1922 { SYS_DESC(SYS_ELR_EL1), access_elr},
1924 { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
1925 { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
1926 { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
1928 { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
1929 { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
1930 { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
1931 { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
1932 { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
1933 { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
1934 { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
1935 { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
1938 MTE_REG(TFSRE0_EL1),
1940 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
1941 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
1943 { SYS_DESC(SYS_PMSCR_EL1), undef_access },
1944 { SYS_DESC(SYS_PMSNEVFR_EL1), undef_access },
1945 { SYS_DESC(SYS_PMSICR_EL1), undef_access },
1946 { SYS_DESC(SYS_PMSIRR_EL1), undef_access },
1947 { SYS_DESC(SYS_PMSFCR_EL1), undef_access },
1948 { SYS_DESC(SYS_PMSEVFR_EL1), undef_access },
1949 { SYS_DESC(SYS_PMSLATFR_EL1), undef_access },
1950 { SYS_DESC(SYS_PMSIDR_EL1), undef_access },
1951 { SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
1952 { SYS_DESC(SYS_PMBPTR_EL1), undef_access },
1953 { SYS_DESC(SYS_PMBSR_EL1), undef_access },
1954 /* PMBIDR_EL1 is not trapped */
1956 { PMU_SYS_REG(SYS_PMINTENSET_EL1),
1957 .access = access_pminten, .reg = PMINTENSET_EL1 },
1958 { PMU_SYS_REG(SYS_PMINTENCLR_EL1),
1959 .access = access_pminten, .reg = PMINTENSET_EL1 },
1960 { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
1962 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
1963 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
1965 { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
1966 { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
1967 { SYS_DESC(SYS_LORN_EL1), trap_loregion },
1968 { SYS_DESC(SYS_LORC_EL1), trap_loregion },
1969 { SYS_DESC(SYS_LORID_EL1), trap_loregion },
1971 { SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 },
1972 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
1974 { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
1975 { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
1976 { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
1977 { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
1978 { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
1979 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
1980 { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
1981 { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
1982 { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
1983 { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
1984 { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
1985 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
1987 { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
1988 { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
1990 { SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
1992 { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
1994 { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
1995 { SYS_DESC(SYS_CLIDR_EL1), access_clidr, reset_clidr, CLIDR_EL1,
1996 .set_user = set_clidr },
1997 { SYS_DESC(SYS_CCSIDR2_EL1), undef_access },
1998 { SYS_DESC(SYS_SMIDR_EL1), undef_access },
1999 { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
2000 { SYS_DESC(SYS_CTR_EL0), access_ctr },
2001 { SYS_DESC(SYS_SVCR), undef_access },
2003 { PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr,
2004 .reset = reset_pmcr, .reg = PMCR_EL0 },
2005 { PMU_SYS_REG(SYS_PMCNTENSET_EL0),
2006 .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
2007 { PMU_SYS_REG(SYS_PMCNTENCLR_EL0),
2008 .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
2009 { PMU_SYS_REG(SYS_PMOVSCLR_EL0),
2010 .access = access_pmovs, .reg = PMOVSSET_EL0 },
2012 * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
2013 * previously (and pointlessly) advertised in the past...
2015 { PMU_SYS_REG(SYS_PMSWINC_EL0),
2016 .get_user = get_raz_reg, .set_user = set_wi_reg,
2017 .access = access_pmswinc, .reset = NULL },
2018 { PMU_SYS_REG(SYS_PMSELR_EL0),
2019 .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
2020 { PMU_SYS_REG(SYS_PMCEID0_EL0),
2021 .access = access_pmceid, .reset = NULL },
2022 { PMU_SYS_REG(SYS_PMCEID1_EL0),
2023 .access = access_pmceid, .reset = NULL },
2024 { PMU_SYS_REG(SYS_PMCCNTR_EL0),
2025 .access = access_pmu_evcntr, .reset = reset_unknown,
2026 .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr},
2027 { PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
2028 .access = access_pmu_evtyper, .reset = NULL },
2029 { PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
2030 .access = access_pmu_evcntr, .reset = NULL },
2032 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
2033 * in 32bit mode. Here we choose to reset it as zero for consistency.
2035 { PMU_SYS_REG(SYS_PMUSERENR_EL0), .access = access_pmuserenr,
2036 .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
2037 { PMU_SYS_REG(SYS_PMOVSSET_EL0),
2038 .access = access_pmovs, .reg = PMOVSSET_EL0 },
2040 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
2041 { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
2042 { SYS_DESC(SYS_TPIDR2_EL0), undef_access },
2044 { SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
2046 { SYS_DESC(SYS_AMCR_EL0), undef_access },
2047 { SYS_DESC(SYS_AMCFGR_EL0), undef_access },
2048 { SYS_DESC(SYS_AMCGCR_EL0), undef_access },
2049 { SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
2050 { SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
2051 { SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
2052 { SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
2053 { SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
2054 AMU_AMEVCNTR0_EL0(0),
2055 AMU_AMEVCNTR0_EL0(1),
2056 AMU_AMEVCNTR0_EL0(2),
2057 AMU_AMEVCNTR0_EL0(3),
2058 AMU_AMEVCNTR0_EL0(4),
2059 AMU_AMEVCNTR0_EL0(5),
2060 AMU_AMEVCNTR0_EL0(6),
2061 AMU_AMEVCNTR0_EL0(7),
2062 AMU_AMEVCNTR0_EL0(8),
2063 AMU_AMEVCNTR0_EL0(9),
2064 AMU_AMEVCNTR0_EL0(10),
2065 AMU_AMEVCNTR0_EL0(11),
2066 AMU_AMEVCNTR0_EL0(12),
2067 AMU_AMEVCNTR0_EL0(13),
2068 AMU_AMEVCNTR0_EL0(14),
2069 AMU_AMEVCNTR0_EL0(15),
2070 AMU_AMEVTYPER0_EL0(0),
2071 AMU_AMEVTYPER0_EL0(1),
2072 AMU_AMEVTYPER0_EL0(2),
2073 AMU_AMEVTYPER0_EL0(3),
2074 AMU_AMEVTYPER0_EL0(4),
2075 AMU_AMEVTYPER0_EL0(5),
2076 AMU_AMEVTYPER0_EL0(6),
2077 AMU_AMEVTYPER0_EL0(7),
2078 AMU_AMEVTYPER0_EL0(8),
2079 AMU_AMEVTYPER0_EL0(9),
2080 AMU_AMEVTYPER0_EL0(10),
2081 AMU_AMEVTYPER0_EL0(11),
2082 AMU_AMEVTYPER0_EL0(12),
2083 AMU_AMEVTYPER0_EL0(13),
2084 AMU_AMEVTYPER0_EL0(14),
2085 AMU_AMEVTYPER0_EL0(15),
2086 AMU_AMEVCNTR1_EL0(0),
2087 AMU_AMEVCNTR1_EL0(1),
2088 AMU_AMEVCNTR1_EL0(2),
2089 AMU_AMEVCNTR1_EL0(3),
2090 AMU_AMEVCNTR1_EL0(4),
2091 AMU_AMEVCNTR1_EL0(5),
2092 AMU_AMEVCNTR1_EL0(6),
2093 AMU_AMEVCNTR1_EL0(7),
2094 AMU_AMEVCNTR1_EL0(8),
2095 AMU_AMEVCNTR1_EL0(9),
2096 AMU_AMEVCNTR1_EL0(10),
2097 AMU_AMEVCNTR1_EL0(11),
2098 AMU_AMEVCNTR1_EL0(12),
2099 AMU_AMEVCNTR1_EL0(13),
2100 AMU_AMEVCNTR1_EL0(14),
2101 AMU_AMEVCNTR1_EL0(15),
2102 AMU_AMEVTYPER1_EL0(0),
2103 AMU_AMEVTYPER1_EL0(1),
2104 AMU_AMEVTYPER1_EL0(2),
2105 AMU_AMEVTYPER1_EL0(3),
2106 AMU_AMEVTYPER1_EL0(4),
2107 AMU_AMEVTYPER1_EL0(5),
2108 AMU_AMEVTYPER1_EL0(6),
2109 AMU_AMEVTYPER1_EL0(7),
2110 AMU_AMEVTYPER1_EL0(8),
2111 AMU_AMEVTYPER1_EL0(9),
2112 AMU_AMEVTYPER1_EL0(10),
2113 AMU_AMEVTYPER1_EL0(11),
2114 AMU_AMEVTYPER1_EL0(12),
2115 AMU_AMEVTYPER1_EL0(13),
2116 AMU_AMEVTYPER1_EL0(14),
2117 AMU_AMEVTYPER1_EL0(15),
2119 { SYS_DESC(SYS_CNTPCT_EL0), access_arch_timer },
2120 { SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer },
2121 { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
2122 { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
2123 { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
2126 PMU_PMEVCNTR_EL0(0),
2127 PMU_PMEVCNTR_EL0(1),
2128 PMU_PMEVCNTR_EL0(2),
2129 PMU_PMEVCNTR_EL0(3),
2130 PMU_PMEVCNTR_EL0(4),
2131 PMU_PMEVCNTR_EL0(5),
2132 PMU_PMEVCNTR_EL0(6),
2133 PMU_PMEVCNTR_EL0(7),
2134 PMU_PMEVCNTR_EL0(8),
2135 PMU_PMEVCNTR_EL0(9),
2136 PMU_PMEVCNTR_EL0(10),
2137 PMU_PMEVCNTR_EL0(11),
2138 PMU_PMEVCNTR_EL0(12),
2139 PMU_PMEVCNTR_EL0(13),
2140 PMU_PMEVCNTR_EL0(14),
2141 PMU_PMEVCNTR_EL0(15),
2142 PMU_PMEVCNTR_EL0(16),
2143 PMU_PMEVCNTR_EL0(17),
2144 PMU_PMEVCNTR_EL0(18),
2145 PMU_PMEVCNTR_EL0(19),
2146 PMU_PMEVCNTR_EL0(20),
2147 PMU_PMEVCNTR_EL0(21),
2148 PMU_PMEVCNTR_EL0(22),
2149 PMU_PMEVCNTR_EL0(23),
2150 PMU_PMEVCNTR_EL0(24),
2151 PMU_PMEVCNTR_EL0(25),
2152 PMU_PMEVCNTR_EL0(26),
2153 PMU_PMEVCNTR_EL0(27),
2154 PMU_PMEVCNTR_EL0(28),
2155 PMU_PMEVCNTR_EL0(29),
2156 PMU_PMEVCNTR_EL0(30),
2157 /* PMEVTYPERn_EL0 */
2158 PMU_PMEVTYPER_EL0(0),
2159 PMU_PMEVTYPER_EL0(1),
2160 PMU_PMEVTYPER_EL0(2),
2161 PMU_PMEVTYPER_EL0(3),
2162 PMU_PMEVTYPER_EL0(4),
2163 PMU_PMEVTYPER_EL0(5),
2164 PMU_PMEVTYPER_EL0(6),
2165 PMU_PMEVTYPER_EL0(7),
2166 PMU_PMEVTYPER_EL0(8),
2167 PMU_PMEVTYPER_EL0(9),
2168 PMU_PMEVTYPER_EL0(10),
2169 PMU_PMEVTYPER_EL0(11),
2170 PMU_PMEVTYPER_EL0(12),
2171 PMU_PMEVTYPER_EL0(13),
2172 PMU_PMEVTYPER_EL0(14),
2173 PMU_PMEVTYPER_EL0(15),
2174 PMU_PMEVTYPER_EL0(16),
2175 PMU_PMEVTYPER_EL0(17),
2176 PMU_PMEVTYPER_EL0(18),
2177 PMU_PMEVTYPER_EL0(19),
2178 PMU_PMEVTYPER_EL0(20),
2179 PMU_PMEVTYPER_EL0(21),
2180 PMU_PMEVTYPER_EL0(22),
2181 PMU_PMEVTYPER_EL0(23),
2182 PMU_PMEVTYPER_EL0(24),
2183 PMU_PMEVTYPER_EL0(25),
2184 PMU_PMEVTYPER_EL0(26),
2185 PMU_PMEVTYPER_EL0(27),
2186 PMU_PMEVTYPER_EL0(28),
2187 PMU_PMEVTYPER_EL0(29),
2188 PMU_PMEVTYPER_EL0(30),
2190 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
2191 * in 32bit mode. Here we choose to reset it as zero for consistency.
2193 { PMU_SYS_REG(SYS_PMCCFILTR_EL0), .access = access_pmu_evtyper,
2194 .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
2196 EL2_REG(VPIDR_EL2, access_rw, reset_unknown, 0),
2197 EL2_REG(VMPIDR_EL2, access_rw, reset_unknown, 0),
2198 EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
2199 EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
2200 EL2_REG(HCR_EL2, access_rw, reset_val, 0),
2201 EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
2202 EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_EL2_DEFAULT ),
2203 EL2_REG(HSTR_EL2, access_rw, reset_val, 0),
2204 EL2_REG(HACR_EL2, access_rw, reset_val, 0),
2206 EL2_REG(TTBR0_EL2, access_rw, reset_val, 0),
2207 EL2_REG(TTBR1_EL2, access_rw, reset_val, 0),
2208 EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1),
2209 EL2_REG(VTTBR_EL2, access_rw, reset_val, 0),
2210 EL2_REG(VTCR_EL2, access_rw, reset_val, 0),
2212 { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
2213 EL2_REG(SPSR_EL2, access_rw, reset_val, 0),
2214 EL2_REG(ELR_EL2, access_rw, reset_val, 0),
2215 { SYS_DESC(SYS_SP_EL1), access_sp_el1},
2217 { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
2218 EL2_REG(AFSR0_EL2, access_rw, reset_val, 0),
2219 EL2_REG(AFSR1_EL2, access_rw, reset_val, 0),
2220 EL2_REG(ESR_EL2, access_rw, reset_val, 0),
2221 { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
2223 EL2_REG(FAR_EL2, access_rw, reset_val, 0),
2224 EL2_REG(HPFAR_EL2, access_rw, reset_val, 0),
2226 EL2_REG(MAIR_EL2, access_rw, reset_val, 0),
2227 EL2_REG(AMAIR_EL2, access_rw, reset_val, 0),
2229 EL2_REG(VBAR_EL2, access_rw, reset_val, 0),
2230 EL2_REG(RVBAR_EL2, access_rw, reset_val, 0),
2231 { SYS_DESC(SYS_RMR_EL2), trap_undef },
2233 EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0),
2234 EL2_REG(TPIDR_EL2, access_rw, reset_val, 0),
2236 EL2_REG(CNTVOFF_EL2, access_rw, reset_val, 0),
2237 EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0),
2239 EL12_REG(SCTLR, access_vm_reg, reset_val, 0x00C50078),
2240 EL12_REG(CPACR, access_rw, reset_val, 0),
2241 EL12_REG(TTBR0, access_vm_reg, reset_unknown, 0),
2242 EL12_REG(TTBR1, access_vm_reg, reset_unknown, 0),
2243 EL12_REG(TCR, access_vm_reg, reset_val, 0),
2244 { SYS_DESC(SYS_SPSR_EL12), access_spsr},
2245 { SYS_DESC(SYS_ELR_EL12), access_elr},
2246 EL12_REG(AFSR0, access_vm_reg, reset_unknown, 0),
2247 EL12_REG(AFSR1, access_vm_reg, reset_unknown, 0),
2248 EL12_REG(ESR, access_vm_reg, reset_unknown, 0),
2249 EL12_REG(FAR, access_vm_reg, reset_unknown, 0),
2250 EL12_REG(MAIR, access_vm_reg, reset_unknown, 0),
2251 EL12_REG(AMAIR, access_vm_reg, reset_amair_el1, 0),
2252 EL12_REG(VBAR, access_rw, reset_val, 0),
2253 EL12_REG(CONTEXTIDR, access_vm_reg, reset_val, 0),
2254 EL12_REG(CNTKCTL, access_rw, reset_val, 0),
2256 EL2_REG(SP_EL2, NULL, reset_unknown, 0),
2259 static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
2260 struct sys_reg_params *p,
2261 const struct sys_reg_desc *r)
2264 return ignore_write(vcpu, p);
2266 u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
2267 u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
2268 u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL1_EL3_SHIFT);
2270 p->regval = ((((dfr >> ID_AA64DFR0_EL1_WRPs_SHIFT) & 0xf) << 28) |
2271 (((dfr >> ID_AA64DFR0_EL1_BRPs_SHIFT) & 0xf) << 24) |
2272 (((dfr >> ID_AA64DFR0_EL1_CTX_CMPs_SHIFT) & 0xf) << 20)
2273 | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12));
2279 * AArch32 debug register mappings
2281 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
2282 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
2284 * None of the other registers share their location, so treat them as
2285 * if they were 64bit.
2287 #define DBG_BCR_BVR_WCR_WVR(n) \
2289 { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
2291 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
2293 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
2295 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
2297 #define DBGBXVR(n) \
2298 { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_bvr, NULL, n }
2301 * Trapped cp14 registers. We generally ignore most of the external
2302 * debug, on the principle that they don't really make sense to a
2303 * guest. Revisit this one day, would this principle change.
2305 static const struct sys_reg_desc cp14_regs[] = {
2307 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
2309 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
2311 DBG_BCR_BVR_WCR_WVR(0),
2313 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
2314 DBG_BCR_BVR_WCR_WVR(1),
2316 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 },
2318 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 },
2319 DBG_BCR_BVR_WCR_WVR(2),
2320 /* DBGDTR[RT]Xint */
2321 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
2322 /* DBGDTR[RT]Xext */
2323 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
2324 DBG_BCR_BVR_WCR_WVR(3),
2325 DBG_BCR_BVR_WCR_WVR(4),
2326 DBG_BCR_BVR_WCR_WVR(5),
2328 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
2330 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
2331 DBG_BCR_BVR_WCR_WVR(6),
2333 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 },
2334 DBG_BCR_BVR_WCR_WVR(7),
2335 DBG_BCR_BVR_WCR_WVR(8),
2336 DBG_BCR_BVR_WCR_WVR(9),
2337 DBG_BCR_BVR_WCR_WVR(10),
2338 DBG_BCR_BVR_WCR_WVR(11),
2339 DBG_BCR_BVR_WCR_WVR(12),
2340 DBG_BCR_BVR_WCR_WVR(13),
2341 DBG_BCR_BVR_WCR_WVR(14),
2342 DBG_BCR_BVR_WCR_WVR(15),
2344 /* DBGDRAR (32bit) */
2345 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
2349 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 },
2352 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 },
2356 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
2359 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
2372 /* DBGDSAR (32bit) */
2373 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
2376 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
2378 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
2380 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
2382 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
2384 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
2386 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
2389 /* Trapped cp14 64bit registers */
2390 static const struct sys_reg_desc cp14_64_regs[] = {
2391 /* DBGDRAR (64bit) */
2392 { Op1( 0), CRm( 1), .access = trap_raz_wi },
2394 /* DBGDSAR (64bit) */
2395 { Op1( 0), CRm( 2), .access = trap_raz_wi },
2398 #define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2) \
2400 Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2), \
2401 .visibility = pmu_visibility
2403 /* Macro to expand the PMEVCNTRn register */
2404 #define PMU_PMEVCNTR(n) \
2405 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
2406 (0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
2407 .access = access_pmu_evcntr }
2409 /* Macro to expand the PMEVTYPERn register */
2410 #define PMU_PMEVTYPER(n) \
2411 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
2412 (0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
2413 .access = access_pmu_evtyper }
2415 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
2416 * depending on the way they are accessed (as a 32bit or a 64bit
2419 static const struct sys_reg_desc cp15_regs[] = {
2420 { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
2421 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 },
2423 { AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 },
2425 { AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 },
2426 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
2427 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 },
2429 { AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 },
2431 { AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
2432 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
2434 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
2435 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
2437 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 },
2439 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 },
2441 { AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 },
2443 { AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 },
2446 * DC{C,I,CI}SW operations:
2448 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
2449 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
2450 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
2453 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 0), .access = access_pmcr },
2454 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 1), .access = access_pmcnten },
2455 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 2), .access = access_pmcnten },
2456 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 3), .access = access_pmovs },
2457 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 4), .access = access_pmswinc },
2458 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 5), .access = access_pmselr },
2459 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 6), .access = access_pmceid },
2460 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 7), .access = access_pmceid },
2461 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 0), .access = access_pmu_evcntr },
2462 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 1), .access = access_pmu_evtyper },
2463 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 2), .access = access_pmu_evcntr },
2464 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 0), .access = access_pmuserenr },
2465 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 1), .access = access_pminten },
2466 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 2), .access = access_pminten },
2467 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 3), .access = access_pmovs },
2468 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 4), .access = access_pmceid },
2469 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 5), .access = access_pmceid },
2471 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 6), .access = trap_raz_wi },
2474 { AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
2476 { AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 },
2478 { AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 },
2480 { AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
2483 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
2485 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
2488 { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
2489 { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
2556 { CP15_PMU_SYS_REG(DIRECT, 0, 14, 15, 7), .access = access_pmu_evtyper },
2558 { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
2559 { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
2562 { Op1(1), CRn( 0), CRm( 0), Op2(2), undef_access },
2564 { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 },
2567 static const struct sys_reg_desc cp15_64_regs[] = {
2568 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
2569 { CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr },
2570 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
2571 { SYS_DESC(SYS_AARCH32_CNTPCT), access_arch_timer },
2572 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
2573 { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
2574 { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
2575 { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
2576 { SYS_DESC(SYS_AARCH32_CNTPCTSS), access_arch_timer },
2579 static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
2584 for (i = 0; i < n; i++) {
2585 if (!is_32 && table[i].reg && !table[i].reset) {
2586 kvm_err("sys_reg table %pS entry %d lacks reset\n", &table[i], i);
2590 if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2591 kvm_err("sys_reg table %pS entry %d out of order\n", &table[i - 1], i - 1);
2599 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
2601 kvm_inject_undefined(vcpu);
2605 static void perform_access(struct kvm_vcpu *vcpu,
2606 struct sys_reg_params *params,
2607 const struct sys_reg_desc *r)
2609 trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
2611 /* Check for regs disabled by runtime config */
2612 if (sysreg_hidden(vcpu, r)) {
2613 kvm_inject_undefined(vcpu);
2618 * Not having an accessor means that we have configured a trap
2619 * that we don't know how to handle. This certainly qualifies
2620 * as a gross bug that should be fixed right away.
2624 /* Skip instruction if instructed so */
2625 if (likely(r->access(vcpu, params, r)))
2630 * emulate_cp -- tries to match a sys_reg access in a handling table, and
2631 * call the corresponding trap handler.
2633 * @params: pointer to the descriptor of the access
2634 * @table: array of trap descriptors
2635 * @num: size of the trap descriptor array
2637 * Return true if the access has been handled, false if not.
2639 static bool emulate_cp(struct kvm_vcpu *vcpu,
2640 struct sys_reg_params *params,
2641 const struct sys_reg_desc *table,
2644 const struct sys_reg_desc *r;
2647 return false; /* Not handled */
2649 r = find_reg(params, table, num);
2652 perform_access(vcpu, params, r);
2660 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
2661 struct sys_reg_params *params)
2663 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
2667 case ESR_ELx_EC_CP15_32:
2668 case ESR_ELx_EC_CP15_64:
2671 case ESR_ELx_EC_CP14_MR:
2672 case ESR_ELx_EC_CP14_64:
2679 print_sys_reg_msg(params,
2680 "Unsupported guest CP%d access at: %08lx [%08lx]\n",
2681 cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2682 kvm_inject_undefined(vcpu);
2686 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
2687 * @vcpu: The VCPU pointer
2688 * @run: The kvm_run struct
2690 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
2691 const struct sys_reg_desc *global,
2694 struct sys_reg_params params;
2695 u64 esr = kvm_vcpu_get_esr(vcpu);
2696 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2697 int Rt2 = (esr >> 10) & 0x1f;
2699 params.CRm = (esr >> 1) & 0xf;
2700 params.is_write = ((esr & 1) == 0);
2703 params.Op1 = (esr >> 16) & 0xf;
2708 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
2709 * backends between AArch32 and AArch64, we get away with it.
2711 if (params.is_write) {
2712 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
2713 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
2717 * If the table contains a handler, handle the
2718 * potential register operation in the case of a read and return
2721 if (emulate_cp(vcpu, ¶ms, global, nr_global)) {
2722 /* Split up the value between registers for the read side */
2723 if (!params.is_write) {
2724 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
2725 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
2731 unhandled_cp_access(vcpu, ¶ms);
2735 static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params);
2738 * The CP10 ID registers are architecturally mapped to AArch64 feature
2739 * registers. Abuse that fact so we can rely on the AArch64 handler for accesses
2742 static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params)
2744 u8 reg_id = (esr >> 10) & 0xf;
2747 params->is_write = ((esr & 1) == 0);
2753 /* CP10 ID registers are read-only */
2754 valid = !params->is_write;
2776 kvm_pr_unimpl("Unhandled cp10 register %s: %u\n",
2777 params->is_write ? "write" : "read", reg_id);
2782 * kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and
2783 * VFP Register' from AArch32.
2784 * @vcpu: The vCPU pointer
2786 * MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers.
2787 * Work out the correct AArch64 system register encoding and reroute to the
2788 * AArch64 system register emulation.
2790 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu)
2792 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2793 u64 esr = kvm_vcpu_get_esr(vcpu);
2794 struct sys_reg_params params;
2796 /* UNDEF on any unhandled register access */
2797 if (!kvm_esr_cp10_id_to_sys64(esr, ¶ms)) {
2798 kvm_inject_undefined(vcpu);
2802 if (emulate_sys_reg(vcpu, ¶ms))
2803 vcpu_set_reg(vcpu, Rt, params.regval);
2809 * kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where
2810 * CRn=0, which corresponds to the AArch32 feature
2812 * @vcpu: the vCPU pointer
2813 * @params: the system register access parameters.
2815 * Our cp15 system register tables do not enumerate the AArch32 feature
2816 * registers. Conveniently, our AArch64 table does, and the AArch32 system
2817 * register encoding can be trivially remapped into the AArch64 for the feature
2818 * registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same.
2820 * According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit
2821 * System registers with (coproc=0b1111, CRn==c0)", read accesses from this
2822 * range are either UNKNOWN or RES0. Rerouting remains architectural as we
2823 * treat undefined registers in this range as RAZ.
2825 static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu,
2826 struct sys_reg_params *params)
2828 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2830 /* Treat impossible writes to RO registers as UNDEFINED */
2831 if (params->is_write) {
2832 unhandled_cp_access(vcpu, params);
2839 * All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32.
2840 * Avoid conflicting with future expansion of AArch64 feature registers
2841 * and simply treat them as RAZ here.
2843 if (params->CRm > 3)
2845 else if (!emulate_sys_reg(vcpu, params))
2848 vcpu_set_reg(vcpu, Rt, params->regval);
2853 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
2854 * @vcpu: The VCPU pointer
2855 * @run: The kvm_run struct
2857 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
2858 struct sys_reg_params *params,
2859 const struct sys_reg_desc *global,
2862 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2864 params->regval = vcpu_get_reg(vcpu, Rt);
2866 if (emulate_cp(vcpu, params, global, nr_global)) {
2867 if (!params->is_write)
2868 vcpu_set_reg(vcpu, Rt, params->regval);
2872 unhandled_cp_access(vcpu, params);
2876 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
2878 return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
2881 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
2883 struct sys_reg_params params;
2885 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
2888 * Certain AArch32 ID registers are handled by rerouting to the AArch64
2889 * system register table. Registers in the ID range where CRm=0 are
2890 * excluded from this scheme as they do not trivially map into AArch64
2891 * system register encodings.
2893 if (params.Op1 == 0 && params.CRn == 0 && params.CRm)
2894 return kvm_emulate_cp15_id_reg(vcpu, ¶ms);
2896 return kvm_handle_cp_32(vcpu, ¶ms, cp15_regs, ARRAY_SIZE(cp15_regs));
2899 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
2901 return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
2904 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
2906 struct sys_reg_params params;
2908 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
2910 return kvm_handle_cp_32(vcpu, ¶ms, cp14_regs, ARRAY_SIZE(cp14_regs));
2913 static bool is_imp_def_sys_reg(struct sys_reg_params *params)
2915 // See ARM DDI 0487E.a, section D12.3.2
2916 return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
2920 * emulate_sys_reg - Emulate a guest access to an AArch64 system register
2921 * @vcpu: The VCPU pointer
2922 * @params: Decoded system register parameters
2924 * Return: true if the system register access was successful, false otherwise.
2926 static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
2927 struct sys_reg_params *params)
2929 const struct sys_reg_desc *r;
2931 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2934 perform_access(vcpu, params, r);
2938 if (is_imp_def_sys_reg(params)) {
2939 kvm_inject_undefined(vcpu);
2941 print_sys_reg_msg(params,
2942 "Unsupported guest sys_reg access at: %lx [%08lx]\n",
2943 *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2944 kvm_inject_undefined(vcpu);
2950 * kvm_reset_sys_regs - sets system registers to reset value
2951 * @vcpu: The VCPU pointer
2953 * This function finds the right table above and sets the registers on the
2954 * virtual CPU struct to their architecturally defined reset values.
2956 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2960 for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++)
2961 if (sys_reg_descs[i].reset)
2962 sys_reg_descs[i].reset(vcpu, &sys_reg_descs[i]);
2966 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
2967 * @vcpu: The VCPU pointer
2969 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
2971 struct sys_reg_params params;
2972 unsigned long esr = kvm_vcpu_get_esr(vcpu);
2973 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2975 trace_kvm_handle_sys_reg(esr);
2977 params = esr_sys64_to_params(esr);
2978 params.regval = vcpu_get_reg(vcpu, Rt);
2980 if (!emulate_sys_reg(vcpu, ¶ms))
2983 if (!params.is_write)
2984 vcpu_set_reg(vcpu, Rt, params.regval);
2988 /******************************************************************************
2990 *****************************************************************************/
2992 static bool index_to_params(u64 id, struct sys_reg_params *params)
2994 switch (id & KVM_REG_SIZE_MASK) {
2995 case KVM_REG_SIZE_U64:
2996 /* Any unused index bits means it's not valid. */
2997 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
2998 | KVM_REG_ARM_COPROC_MASK
2999 | KVM_REG_ARM64_SYSREG_OP0_MASK
3000 | KVM_REG_ARM64_SYSREG_OP1_MASK
3001 | KVM_REG_ARM64_SYSREG_CRN_MASK
3002 | KVM_REG_ARM64_SYSREG_CRM_MASK
3003 | KVM_REG_ARM64_SYSREG_OP2_MASK))
3005 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
3006 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
3007 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
3008 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
3009 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
3010 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
3011 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
3012 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
3013 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
3014 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
3021 const struct sys_reg_desc *get_reg_by_id(u64 id,
3022 const struct sys_reg_desc table[],
3025 struct sys_reg_params params;
3027 if (!index_to_params(id, ¶ms))
3030 return find_reg(¶ms, table, num);
3033 /* Decode an index value, and find the sys_reg_desc entry. */
3034 static const struct sys_reg_desc *
3035 id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
3036 const struct sys_reg_desc table[], unsigned int num)
3039 const struct sys_reg_desc *r;
3041 /* We only do sys_reg for now. */
3042 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
3045 r = get_reg_by_id(id, table, num);
3047 /* Not saved in the sys_reg array and not otherwise accessible? */
3048 if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r)))
3055 * These are the invariant sys_reg registers: we let the guest see the
3056 * host versions of these, so they're part of the guest state.
3058 * A future CPU may provide a mechanism to present different values to
3059 * the guest, or a future kvm may trap them.
3062 #define FUNCTION_INVARIANT(reg) \
3063 static void get_##reg(struct kvm_vcpu *v, \
3064 const struct sys_reg_desc *r) \
3066 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
3069 FUNCTION_INVARIANT(midr_el1)
3070 FUNCTION_INVARIANT(revidr_el1)
3071 FUNCTION_INVARIANT(aidr_el1)
3073 static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
3075 ((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
3078 /* ->val is filled in by kvm_sys_reg_table_init() */
3079 static struct sys_reg_desc invariant_sys_regs[] __ro_after_init = {
3080 { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
3081 { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
3082 { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
3083 { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
3086 static int get_invariant_sys_reg(u64 id, u64 __user *uaddr)
3088 const struct sys_reg_desc *r;
3090 r = get_reg_by_id(id, invariant_sys_regs,
3091 ARRAY_SIZE(invariant_sys_regs));
3095 return put_user(r->val, uaddr);
3098 static int set_invariant_sys_reg(u64 id, u64 __user *uaddr)
3100 const struct sys_reg_desc *r;
3103 r = get_reg_by_id(id, invariant_sys_regs,
3104 ARRAY_SIZE(invariant_sys_regs));
3108 if (get_user(val, uaddr))
3111 /* This is what we mean by invariant: you can't change it. */
3118 static int demux_c15_get(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
3121 u32 __user *uval = uaddr;
3123 /* Fail if we have unknown bits set. */
3124 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
3125 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
3128 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
3129 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
3130 if (KVM_REG_SIZE(id) != 4)
3132 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
3133 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
3134 if (val >= CSSELR_MAX)
3137 return put_user(get_ccsidr(vcpu, val), uval);
3143 static int demux_c15_set(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
3146 u32 __user *uval = uaddr;
3148 /* Fail if we have unknown bits set. */
3149 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
3150 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
3153 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
3154 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
3155 if (KVM_REG_SIZE(id) != 4)
3157 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
3158 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
3159 if (val >= CSSELR_MAX)
3162 if (get_user(newval, uval))
3165 return set_ccsidr(vcpu, val, newval);
3171 int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
3172 const struct sys_reg_desc table[], unsigned int num)
3174 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
3175 const struct sys_reg_desc *r;
3179 r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
3180 if (!r || sysreg_hidden_user(vcpu, r))
3184 ret = (r->get_user)(vcpu, r, &val);
3186 val = __vcpu_sys_reg(vcpu, r->reg);
3191 ret = put_user(val, uaddr);
3196 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
3198 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
3201 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
3202 return demux_c15_get(vcpu, reg->id, uaddr);
3204 err = get_invariant_sys_reg(reg->id, uaddr);
3208 return kvm_sys_reg_get_user(vcpu, reg,
3209 sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
3212 int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
3213 const struct sys_reg_desc table[], unsigned int num)
3215 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
3216 const struct sys_reg_desc *r;
3220 if (get_user(val, uaddr))
3223 r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
3224 if (!r || sysreg_hidden_user(vcpu, r))
3227 if (sysreg_user_write_ignore(vcpu, r))
3231 ret = (r->set_user)(vcpu, r, val);
3233 __vcpu_sys_reg(vcpu, r->reg) = val;
3240 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
3242 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
3245 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
3246 return demux_c15_set(vcpu, reg->id, uaddr);
3248 err = set_invariant_sys_reg(reg->id, uaddr);
3252 return kvm_sys_reg_set_user(vcpu, reg,
3253 sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
3256 static unsigned int num_demux_regs(void)
3261 static int write_demux_regids(u64 __user *uindices)
3263 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
3266 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
3267 for (i = 0; i < CSSELR_MAX; i++) {
3268 if (put_user(val | i, uindices))
3275 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
3277 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
3278 KVM_REG_ARM64_SYSREG |
3279 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
3280 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
3281 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
3282 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
3283 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
3286 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
3291 if (put_user(sys_reg_to_index(reg), *uind))
3298 static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
3299 const struct sys_reg_desc *rd,
3301 unsigned int *total)
3304 * Ignore registers we trap but don't save,
3305 * and for which no custom user accessor is provided.
3307 if (!(rd->reg || rd->get_user))
3310 if (sysreg_hidden_user(vcpu, rd))
3313 if (!copy_reg_to_user(rd, uind))
3320 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
3321 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
3323 const struct sys_reg_desc *i2, *end2;
3324 unsigned int total = 0;
3328 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
3330 while (i2 != end2) {
3331 err = walk_one_sys_reg(vcpu, i2++, &uind, &total);
3338 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
3340 return ARRAY_SIZE(invariant_sys_regs)
3342 + walk_sys_regs(vcpu, (u64 __user *)NULL);
3345 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
3350 /* Then give them all the invariant registers' indices. */
3351 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
3352 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
3357 err = walk_sys_regs(vcpu, uindices);
3362 return write_demux_regids(uindices);
3365 int __init kvm_sys_reg_table_init(void)
3370 /* Make sure tables are unique and in order. */
3371 valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false);
3372 valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true);
3373 valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true);
3374 valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true);
3375 valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true);
3376 valid &= check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false);
3381 /* We abuse the reset function to overwrite the table itself. */
3382 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
3383 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);