return RESUME_GUEST;
}
+/*
+ * If the lppaca had pmcregs_in_use clear when we exited the guest, then
+ * HFSCR_PM is cleared for next entry. If the guest then tries to access
+ * the PMU SPRs, we get this facility unavailable interrupt. Putting HFSCR_PM
+ * back in the guest HFSCR will cause the next entry to load the PMU SPRs and
+ * allow the guest access to continue.
+ */
+static int kvmppc_pmu_unavailable(struct kvm_vcpu *vcpu)
+{
+ if (!(vcpu->arch.hfscr_permitted & HFSCR_PM))
+ return EMULATE_FAIL;
+
+ vcpu->arch.hfscr |= HFSCR_PM;
+
+ return RESUME_GUEST;
+}
+
static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
struct task_struct *tsk)
{
* to emulate.
* Otherwise, we just generate a program interrupt to the guest.
*/
- case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
+ case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: {
+ u64 cause = vcpu->arch.hfscr >> 56;
+
r = EMULATE_FAIL;
- if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) &&
- cpu_has_feature(CPU_FTR_ARCH_300))
- r = kvmppc_emulate_doorbell_instr(vcpu);
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ if (cause == FSCR_MSGP_LG)
+ r = kvmppc_emulate_doorbell_instr(vcpu);
+ if (cause == FSCR_PM_LG)
+ r = kvmppc_pmu_unavailable(vcpu);
+ }
if (r == EMULATE_FAIL) {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
r = RESUME_GUEST;
}
break;
+ }
case BOOK3S_INTERRUPT_HV_RM_HARD:
r = RESUME_PASSTHROUGH;
vcpu->arch.hfscr_permitted = vcpu->arch.hfscr;
+ /*
+ * PM is demand-faulted so start with it clear.
+ */
+ vcpu->arch.hfscr &= ~HFSCR_PM;
+
kvmppc_mmu_book3s_hv_init(vcpu);
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
static void switch_pmu_to_guest(struct kvm_vcpu *vcpu,
struct p9_host_os_sprs *host_os_sprs)
{
+ struct lppaca *lp;
+ int load_pmu = 1;
+
+ lp = vcpu->arch.vpa.pinned_addr;
+ if (lp)
+ load_pmu = lp->pmcregs_in_use;
+
+ /* Save host */
if (ppc_get_pmu_inuse()) {
/*
* It might be better to put PMU handling (at least for the
}
#ifdef CONFIG_PPC_PSERIES
+ /* After saving PMU, before loading guest PMU, flip pmcregs_in_use */
if (kvmhv_on_pseries()) {
barrier();
- if (vcpu->arch.vpa.pinned_addr) {
- struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
- get_lppaca()->pmcregs_in_use = lp->pmcregs_in_use;
- } else {
- get_lppaca()->pmcregs_in_use = 1;
- }
+ get_lppaca()->pmcregs_in_use = load_pmu;
barrier();
}
#endif
- /* load guest */
- mtspr(SPRN_PMC1, vcpu->arch.pmc[0]);
- mtspr(SPRN_PMC2, vcpu->arch.pmc[1]);
- mtspr(SPRN_PMC3, vcpu->arch.pmc[2]);
- mtspr(SPRN_PMC4, vcpu->arch.pmc[3]);
- mtspr(SPRN_PMC5, vcpu->arch.pmc[4]);
- mtspr(SPRN_PMC6, vcpu->arch.pmc[5]);
- mtspr(SPRN_MMCR1, vcpu->arch.mmcr[1]);
- mtspr(SPRN_MMCR2, vcpu->arch.mmcr[2]);
- mtspr(SPRN_SDAR, vcpu->arch.sdar);
- mtspr(SPRN_SIAR, vcpu->arch.siar);
- mtspr(SPRN_SIER, vcpu->arch.sier[0]);
+ /*
+ * Load guest. If the VPA said the PMCs are not in use but the guest
+ * tried to access them anyway, HFSCR[PM] will be set by the HFAC
+ * fault so we can make forward progress.
+ */
+ if (load_pmu || (vcpu->arch.hfscr & HFSCR_PM)) {
+ mtspr(SPRN_PMC1, vcpu->arch.pmc[0]);
+ mtspr(SPRN_PMC2, vcpu->arch.pmc[1]);
+ mtspr(SPRN_PMC3, vcpu->arch.pmc[2]);
+ mtspr(SPRN_PMC4, vcpu->arch.pmc[3]);
+ mtspr(SPRN_PMC5, vcpu->arch.pmc[4]);
+ mtspr(SPRN_PMC6, vcpu->arch.pmc[5]);
+ mtspr(SPRN_MMCR1, vcpu->arch.mmcr[1]);
+ mtspr(SPRN_MMCR2, vcpu->arch.mmcr[2]);
+ mtspr(SPRN_SDAR, vcpu->arch.sdar);
+ mtspr(SPRN_SIAR, vcpu->arch.siar);
+ mtspr(SPRN_SIER, vcpu->arch.sier[0]);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ mtspr(SPRN_MMCR3, vcpu->arch.mmcr[3]);
+ mtspr(SPRN_SIER2, vcpu->arch.sier[1]);
+ mtspr(SPRN_SIER3, vcpu->arch.sier[2]);
+ }
- if (cpu_has_feature(CPU_FTR_ARCH_31)) {
- mtspr(SPRN_MMCR3, vcpu->arch.mmcr[3]);
- mtspr(SPRN_SIER2, vcpu->arch.sier[1]);
- mtspr(SPRN_SIER3, vcpu->arch.sier[2]);
- }
+ /* Set MMCRA then MMCR0 last */
+ mtspr(SPRN_MMCRA, vcpu->arch.mmcra);
+ mtspr(SPRN_MMCR0, vcpu->arch.mmcr[0]);
+ /* No isync necessary because we're starting counters */
- /* Set MMCRA then MMCR0 last */
- mtspr(SPRN_MMCRA, vcpu->arch.mmcra);
- mtspr(SPRN_MMCR0, vcpu->arch.mmcr[0]);
- /* No isync necessary because we're starting counters */
+ if (!vcpu->arch.nested &&
+ (vcpu->arch.hfscr_permitted & HFSCR_PM))
+ vcpu->arch.hfscr |= HFSCR_PM;
+ }
}
static void switch_pmu_to_host(struct kvm_vcpu *vcpu,
vcpu->arch.sier[1] = mfspr(SPRN_SIER2);
vcpu->arch.sier[2] = mfspr(SPRN_SIER3);
}
- } else {
+
+ } else if (vcpu->arch.hfscr & HFSCR_PM) {
+ /*
+ * The guest accessed PMC SPRs without specifying they should
+ * be preserved, or it cleared pmcregs_in_use after the last
+ * access. Just ensure they are frozen.
+ */
freeze_pmu(mfspr(SPRN_MMCR0), mfspr(SPRN_MMCRA));
- }
+
+ /*
+ * Demand-fault PMU register access in the guest.
+ *
+ * This is used to grab the guest's VPA pmcregs_in_use value
+ * and reflect it into the host's VPA in the case of a nested
+ * hypervisor.
+ *
+ * It also avoids having to zero-out SPRs after each guest
+ * exit to avoid side-channels when.
+ *
+ * This is cleared here when we exit the guest, so later HFSCR
+ * interrupt handling can add it back to run the guest with
+ * PM enabled next time.
+ */
+ if (!vcpu->arch.nested)
+ vcpu->arch.hfscr &= ~HFSCR_PM;
+ } /* otherwise the PMU should still be frozen */
#ifdef CONFIG_PPC_PSERIES
if (kvmhv_on_pseries()) {