Merge tag 'kvm-x86-pmu-6.6-fixes' of https://github.com/kvm-x86/linux into HEAD
authorPaolo Bonzini <pbonzini@redhat.com>
Sun, 15 Oct 2023 12:24:18 +0000 (08:24 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Sun, 15 Oct 2023 12:24:18 +0000 (08:24 -0400)
KVM x86/pmu fixes for 6.6:

 - Truncate writes to PMU counters to the counter's width to avoid spurious
   overflows when emulating counter events in software.

 - Set the LVTPC entry mask bit when handling a PMI (to match Intel-defined
   architectural behavior).

 - Treat KVM_REQ_PMI as a wake event instead of queueing host IRQ work to
   kick the guest out of emulated halt.

1  2 
arch/x86/kvm/x86.c

diff --combined arch/x86/kvm/x86.c
@@@ -5382,37 -5382,26 +5382,37 @@@ static int kvm_vcpu_ioctl_x86_set_debug
        return 0;
  }
  
 -static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
 -                                       struct kvm_xsave *guest_xsave)
 -{
 -      if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
 -              return;
 -
 -      fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu,
 -                                     guest_xsave->region,
 -                                     sizeof(guest_xsave->region),
 -                                     vcpu->arch.pkru);
 -}
  
  static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
                                          u8 *state, unsigned int size)
  {
 +      /*
 +       * Only copy state for features that are enabled for the guest.  The
 +       * state itself isn't problematic, but setting bits in the header for
 +       * features that are supported in *this* host but not exposed to the
 +       * guest can result in KVM_SET_XSAVE failing when live migrating to a
 +       * compatible host without the features that are NOT exposed to the
 +       * guest.
 +       *
 +       * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
 +       * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
 +       * supported by the host.
 +       */
 +      u64 supported_xcr0 = vcpu->arch.guest_supported_xcr0 |
 +                           XFEATURE_MASK_FPSSE;
 +
        if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
                return;
  
 -      fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu,
 -                                     state, size, vcpu->arch.pkru);
 +      fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, state, size,
 +                                     supported_xcr0, vcpu->arch.pkru);
 +}
 +
 +static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
 +                                       struct kvm_xsave *guest_xsave)
 +{
 +      return kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region,
 +                                           sizeof(guest_xsave->region));
  }
  
  static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
@@@ -12854,6 -12843,9 +12854,9 @@@ static inline bool kvm_vcpu_has_events(
                return true;
  #endif
  
+       if (kvm_test_request(KVM_REQ_PMI, vcpu))
+               return true;
        if (kvm_arch_interrupt_allowed(vcpu) &&
            (kvm_cpu_has_interrupt(vcpu) ||
            kvm_guest_apic_has_interrupt(vcpu)))