Merge remote-tracking branch 'kvmarm/misc-5.5' into kvmarm/next
authorMarc Zyngier <maz@kernel.org>
Fri, 8 Nov 2019 11:27:29 +0000 (11:27 +0000)
committerMarc Zyngier <maz@kernel.org>
Fri, 8 Nov 2019 11:27:29 +0000 (11:27 +0000)
1  2 
arch/arm/include/asm/kvm_emulate.h
arch/arm/kvm/guest.c
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/kvm/guest.c
virt/kvm/arm/arm.c

@@@ -95,12 -95,12 +95,12 @@@ static inline unsigned long *vcpu_hcr(c
        return (unsigned long *)&vcpu->arch.hcr;
  }
  
- static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu)
+ static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
  {
        vcpu->arch.hcr &= ~HCR_TWE;
  }
  
- static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu)
+ static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
  {
        vcpu->arch.hcr |= HCR_TWE;
  }
@@@ -167,11 -167,6 +167,11 @@@ static inline bool kvm_vcpu_dabt_isvali
        return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
  }
  
 +static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
 +{
 +      return kvm_vcpu_get_hsr(vcpu) & (HSR_CM | HSR_WNR | HSR_FSC);
 +}
 +
  static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
  {
        return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
diff --combined arch/arm/kvm/guest.c
  #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
  
  struct kvm_stats_debugfs_item debugfs_entries[] = {
+       VCPU_STAT(halt_successful_poll),
+       VCPU_STAT(halt_attempted_poll),
+       VCPU_STAT(halt_poll_invalid),
+       VCPU_STAT(halt_wakeup),
        VCPU_STAT(hvc_exit_stat),
        VCPU_STAT(wfe_exit_stat),
        VCPU_STAT(wfi_exit_stat),
@@@ -255,12 -259,6 +259,12 @@@ int __kvm_arm_vcpu_get_events(struct kv
  {
        events->exception.serror_pending = !!(*vcpu_hcr(vcpu) & HCR_VA);
  
 +      /*
 +       * We never return a pending ext_dabt here because we deliver it to
 +       * the virtual CPU directly when setting the event and it's no longer
 +       * 'pending' at this point.
 +       */
 +
        return 0;
  }
  
@@@ -269,16 -267,12 +273,16 @@@ int __kvm_arm_vcpu_set_events(struct kv
  {
        bool serror_pending = events->exception.serror_pending;
        bool has_esr = events->exception.serror_has_esr;
 +      bool ext_dabt_pending = events->exception.ext_dabt_pending;
  
        if (serror_pending && has_esr)
                return -EINVAL;
        else if (serror_pending)
                kvm_inject_vabt(vcpu);
  
 +      if (ext_dabt_pending)
 +              kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
 +
        return 0;
  }
  
@@@ -53,8 -53,18 +53,18 @@@ static inline void vcpu_reset_hcr(struc
                /* trap error record accesses */
                vcpu->arch.hcr_el2 |= HCR_TERR;
        }
-       if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
+       if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
                vcpu->arch.hcr_el2 |= HCR_FWB;
+       } else {
+               /*
+                * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
+                * get set in SCTLR_EL1 such that we can detect when the guest
+                * MMU gets turned on and do the necessary cache maintenance
+                * then.
+                */
+               vcpu->arch.hcr_el2 |= HCR_TVM;
+       }
  
        if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
                vcpu->arch.hcr_el2 &= ~HCR_RW;
@@@ -77,14 -87,19 +87,19 @@@ static inline unsigned long *vcpu_hcr(s
        return (unsigned long *)&vcpu->arch.hcr_el2;
  }
  
- static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu)
+ static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
  {
        vcpu->arch.hcr_el2 &= ~HCR_TWE;
+       if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count))
+               vcpu->arch.hcr_el2 &= ~HCR_TWI;
+       else
+               vcpu->arch.hcr_el2 |= HCR_TWI;
  }
  
- static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu)
+ static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
  {
        vcpu->arch.hcr_el2 |= HCR_TWE;
+       vcpu->arch.hcr_el2 |= HCR_TWI;
  }
  
  static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
@@@ -258,11 -273,6 +273,11 @@@ static inline bool kvm_vcpu_dabt_isvali
        return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
  }
  
 +static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
 +{
 +      return kvm_vcpu_get_hsr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
 +}
 +
  static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
  {
        return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
diff --combined arch/arm64/kvm/guest.c
  #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
  
  struct kvm_stats_debugfs_item debugfs_entries[] = {
+       VCPU_STAT(halt_successful_poll),
+       VCPU_STAT(halt_attempted_poll),
+       VCPU_STAT(halt_poll_invalid),
+       VCPU_STAT(halt_wakeup),
        VCPU_STAT(hvc_exit_stat),
        VCPU_STAT(wfe_exit_stat),
        VCPU_STAT(wfi_exit_stat),
@@@ -712,12 -716,6 +716,12 @@@ int __kvm_arm_vcpu_get_events(struct kv
        if (events->exception.serror_pending && events->exception.serror_has_esr)
                events->exception.serror_esr = vcpu_get_vsesr(vcpu);
  
 +      /*
 +       * We never return a pending ext_dabt here because we deliver it to
 +       * the virtual CPU directly when setting the event and it's no longer
 +       * 'pending' at this point.
 +       */
 +
        return 0;
  }
  
@@@ -726,7 -724,6 +730,7 @@@ int __kvm_arm_vcpu_set_events(struct kv
  {
        bool serror_pending = events->exception.serror_pending;
        bool has_esr = events->exception.serror_has_esr;
 +      bool ext_dabt_pending = events->exception.ext_dabt_pending;
  
        if (serror_pending && has_esr) {
                if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
                kvm_inject_vabt(vcpu);
        }
  
 +      if (ext_dabt_pending)
 +              kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
 +
        return 0;
  }
  
@@@ -868,9 -862,6 +872,9 @@@ int kvm_arm_vcpu_arch_set_attr(struct k
        case KVM_ARM_VCPU_TIMER_CTRL:
                ret = kvm_arm_timer_set_attr(vcpu, attr);
                break;
 +      case KVM_ARM_VCPU_PVTIME_CTRL:
 +              ret = kvm_arm_pvtime_set_attr(vcpu, attr);
 +              break;
        default:
                ret = -ENXIO;
                break;
@@@ -891,9 -882,6 +895,9 @@@ int kvm_arm_vcpu_arch_get_attr(struct k
        case KVM_ARM_VCPU_TIMER_CTRL:
                ret = kvm_arm_timer_get_attr(vcpu, attr);
                break;
 +      case KVM_ARM_VCPU_PVTIME_CTRL:
 +              ret = kvm_arm_pvtime_get_attr(vcpu, attr);
 +              break;
        default:
                ret = -ENXIO;
                break;
@@@ -914,9 -902,6 +918,9 @@@ int kvm_arm_vcpu_arch_has_attr(struct k
        case KVM_ARM_VCPU_TIMER_CTRL:
                ret = kvm_arm_timer_has_attr(vcpu, attr);
                break;
 +      case KVM_ARM_VCPU_PVTIME_CTRL:
 +              ret = kvm_arm_pvtime_has_attr(vcpu, attr);
 +              break;
        default:
                ret = -ENXIO;
                break;
diff --combined virt/kvm/arm/arm.c
  #include <asm/kvm_coproc.h>
  #include <asm/sections.h>
  
 +#include <kvm/arm_hypercalls.h>
 +#include <kvm/arm_pmu.h>
 +#include <kvm/arm_psci.h>
 +
  #ifdef REQUIRES_VIRT
  __asm__(".arch_extension      virt");
  #endif
@@@ -102,26 -98,6 +102,26 @@@ int kvm_arch_check_processor_compat(voi
        return 0;
  }
  
 +int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
 +                          struct kvm_enable_cap *cap)
 +{
 +      int r;
 +
 +      if (cap->flags)
 +              return -EINVAL;
 +
 +      switch (cap->cap) {
 +      case KVM_CAP_ARM_NISV_TO_USER:
 +              r = 0;
 +              kvm->arch.return_nisv_io_abort_to_user = true;
 +              break;
 +      default:
 +              r = -EINVAL;
 +              break;
 +      }
 +
 +      return r;
 +}
  
  /**
   * kvm_arch_init_vm - initializes a VM data structure
@@@ -221,8 -197,6 +221,8 @@@ int kvm_vm_ioctl_check_extension(struc
        case KVM_CAP_IMMEDIATE_EXIT:
        case KVM_CAP_VCPU_EVENTS:
        case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2:
 +      case KVM_CAP_ARM_NISV_TO_USER:
 +      case KVM_CAP_ARM_INJECT_EXT_DABT:
                r = 1;
                break;
        case KVM_CAP_ARM_SET_DEVICE_ADDR:
@@@ -348,20 -322,24 +348,24 @@@ void kvm_arch_vcpu_blocking(struct kvm_
        /*
         * If we're about to block (most likely because we've just hit a
         * WFI), we need to sync back the state of the GIC CPU interface
-        * so that we have the lastest PMR and group enables. This ensures
+        * so that we have the latest PMR and group enables. This ensures
         * that kvm_arch_vcpu_runnable has up-to-date data to decide
         * whether we have pending interrupts.
+        *
+        * For the same reason, we want to tell GICv4 that we need
+        * doorbells to be signalled, should an interrupt become pending.
         */
        preempt_disable();
        kvm_vgic_vmcr_sync(vcpu);
+       vgic_v4_put(vcpu, true);
        preempt_enable();
-       kvm_vgic_v4_enable_doorbell(vcpu);
  }
  
  void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
  {
-       kvm_vgic_v4_disable_doorbell(vcpu);
+       preempt_disable();
+       vgic_v4_load(vcpu);
+       preempt_enable();
  }
  
  int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
  
        kvm_arm_reset_debug_ptr(vcpu);
  
 +      kvm_arm_pvtime_vcpu_init(&vcpu->arch);
 +
        return kvm_vgic_vcpu_init(vcpu);
  }
  
@@@ -408,13 -384,11 +412,13 @@@ void kvm_arch_vcpu_load(struct kvm_vcp
        kvm_vcpu_load_sysregs(vcpu);
        kvm_arch_vcpu_load_fp(vcpu);
        kvm_vcpu_pmu_restore_guest(vcpu);
 +      if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
 +              kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
  
        if (single_task_running())
-               vcpu_clear_wfe_traps(vcpu);
+               vcpu_clear_wfx_traps(vcpu);
        else
-               vcpu_set_wfe_traps(vcpu);
+               vcpu_set_wfx_traps(vcpu);
  
        vcpu_ptrauth_setup_lazy(vcpu);
  }
@@@ -675,9 -649,6 +679,9 @@@ static void check_vcpu_requests(struct 
                 * that a VCPU sees new virtual interrupts.
                 */
                kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
 +
 +              if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
 +                      kvm_update_stolen_time(vcpu);
        }
  }