Merge branch kvm-arm64/timer-vm-offsets into kvmarm-master/next
authorMarc Zyngier <maz@kernel.org>
Fri, 21 Apr 2023 08:31:17 +0000 (09:31 +0100)
committerMarc Zyngier <maz@kernel.org>
Fri, 21 Apr 2023 08:36:40 +0000 (09:36 +0100)
* kvm-arm64/timer-vm-offsets: (21 commits)
  : .
  : This series aims at satisfying multiple goals:
  :
  : - allow a VMM to atomically restore a timer offset for a whole VM
  :   instead of updating the offset each time a vcpu get its counter
  :   written
  :
  : - allow a VMM to save/restore the physical timer context, something
  :   that we cannot do at the moment due to the lack of offsetting
  :
  : - provide a framework that is suitable for NV support, where we get
  :   both global and per timer, per vcpu offsetting, and manage
  :   interrupts in a less braindead way.
  :
  : Conflict resolution involves using the new per-vcpu config lock instead
  : of the home-grown timer lock.
  : .
  KVM: arm64: Handle 32bit CNTPCTSS traps
  KVM: arm64: selftests: Augment existing timer test to handle variable offset
  KVM: arm64: selftests: Deal with spurious timer interrupts
  KVM: arm64: selftests: Add physical timer registers to the sysreg list
  KVM: arm64: nv: timers: Support hyp timer emulation
  KVM: arm64: nv: timers: Add a per-timer, per-vcpu offset
  KVM: arm64: Document KVM_ARM_SET_CNT_OFFSETS and co
  KVM: arm64: timers: Abstract the number of valid timers per vcpu
  KVM: arm64: timers: Fast-track CNTPCT_EL0 trap handling
  KVM: arm64: Elide kern_hyp_va() in VHE-specific parts of the hypervisor
  KVM: arm64: timers: Move the timer IRQs into arch_timer_vm_data
  KVM: arm64: timers: Abstract per-timer IRQ access
  KVM: arm64: timers: Rationalise per-vcpu timer init
  KVM: arm64: timers: Allow save/restoring of the physical timer
  KVM: arm64: timers: Allow userspace to set the global counter offset
  KVM: arm64: Expose {un,}lock_all_vcpus() to the rest of KVM
  KVM: arm64: timers: Allow physical offset without CNTPOFF_EL2
  KVM: arm64: timers: Use CNTPOFF_EL2 to offset the physical timer
  arm64: Add HAS_ECV_CNTPOFF capability
  arm64: Add CNTPOFF_EL2 register definition
  ...

Signed-off-by: Marc Zyngier <maz@kernel.org>
1  2 
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/arch_timer.c
arch/arm64/kvm/arm.c
arch/arm64/kvm/guest.c
arch/arm64/kvm/hypercalls.c
arch/arm64/kvm/vgic/vgic-kvm-device.c
arch/arm64/kvm/vgic/vgic.c
include/kvm/arm_arch_timer.h

Simple merge
@@@ -774,20 -1033,13 +1033,12 @@@ void kvm_timer_vcpu_init(struct kvm_vcp
  
        hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
        timer->bg_timer.function = kvm_bg_timer_expire;
+ }
  
-       hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
-       hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
-       vtimer->hrtimer.function = kvm_hrtimer_expire;
-       ptimer->hrtimer.function = kvm_hrtimer_expire;
-       vtimer->irq.irq = default_vtimer_irq.irq;
-       ptimer->irq.irq = default_ptimer_irq.irq;
-       vtimer->host_timer_irq = host_vtimer_irq;
-       ptimer->host_timer_irq = host_ptimer_irq;
-       vtimer->host_timer_irq_flags = host_vtimer_irq_flags;
-       ptimer->host_timer_irq_flags = host_ptimer_irq_flags;
+ void kvm_timer_init_vm(struct kvm *kvm)
+ {
 -      mutex_init(&kvm->arch.timer_data.lock);
+       for (int i = 0; i < NR_KVM_TIMERS; i++)
+               kvm->arch.timer_data.ppi[i] = default_ppi[i];
  }
  
  void kvm_timer_cpu_up(void)
@@@ -1192,32 -1458,40 +1457,40 @@@ void kvm_timer_vcpu_terminate(struct kv
  
  static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
  {
-       int vtimer_irq, ptimer_irq, ret;
-       unsigned long i;
+       u32 ppis = 0;
+       bool valid;
  
-       vtimer_irq = vcpu_vtimer(vcpu)->irq.irq;
-       ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu));
-       if (ret)
-               return false;
 -      mutex_lock(&vcpu->kvm->arch.timer_data.lock);
++      mutex_lock(&vcpu->kvm->arch.config_lock);
  
-       ptimer_irq = vcpu_ptimer(vcpu)->irq.irq;
-       ret = kvm_vgic_set_owner(vcpu, ptimer_irq, vcpu_ptimer(vcpu));
-       if (ret)
-               return false;
+       for (int i = 0; i < nr_timers(vcpu); i++) {
+               struct arch_timer_context *ctx;
+               int irq;
+               ctx = vcpu_get_timer(vcpu, i);
+               irq = timer_irq(ctx);
+               if (kvm_vgic_set_owner(vcpu, irq, ctx))
+                       break;
  
-       kvm_for_each_vcpu(i, vcpu, vcpu->kvm) {
-               if (vcpu_vtimer(vcpu)->irq.irq != vtimer_irq ||
-                   vcpu_ptimer(vcpu)->irq.irq != ptimer_irq)
-                       return false;
+               /*
+                * We know by construction that we only have PPIs, so
+                * all values are less than 32.
+                */
+               ppis |= BIT(irq);
        }
  
-       return true;
+       valid = hweight32(ppis) == nr_timers(vcpu);
+       if (valid)
+               set_bit(KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE, &vcpu->kvm->arch.flags);
 -      mutex_unlock(&vcpu->kvm->arch.timer_data.lock);
++      mutex_unlock(&vcpu->kvm->arch.config_lock);
+       return valid;
  }
  
- bool kvm_arch_timer_get_input_level(int vintid)
static bool kvm_arch_timer_get_input_level(int vintid)
  {
        struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
-       struct arch_timer_context *timer;
  
        if (WARN(!vcpu, "No vcpu context!\n"))
                return false;
@@@ -1327,21 -1577,42 +1576,42 @@@ int kvm_arm_timer_set_attr(struct kvm_v
        if (!(irq_is_ppi(irq)))
                return -EINVAL;
  
-       if (vcpu->arch.timer_cpu.enabled)
-               return -EBUSY;
 -      mutex_lock(&vcpu->kvm->arch.timer_data.lock);
++      mutex_lock(&vcpu->kvm->arch.config_lock);
+       if (test_bit(KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE,
+                    &vcpu->kvm->arch.flags)) {
+               ret = -EBUSY;
+               goto out;
+       }
  
        switch (attr->attr) {
        case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
-               set_timer_irqs(vcpu->kvm, irq, ptimer->irq.irq);
+               idx = TIMER_VTIMER;
                break;
        case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
-               set_timer_irqs(vcpu->kvm, vtimer->irq.irq, irq);
+               idx = TIMER_PTIMER;
+               break;
+       case KVM_ARM_VCPU_TIMER_IRQ_HVTIMER:
+               idx = TIMER_HVTIMER;
+               break;
+       case KVM_ARM_VCPU_TIMER_IRQ_HPTIMER:
+               idx = TIMER_HPTIMER;
                break;
        default:
-               return -ENXIO;
+               ret = -ENXIO;
+               goto out;
        }
  
-       return 0;
+       /*
+        * We cannot validate the IRQ unicity before we run, so take it at
+        * face value. The verdict will be given on first vcpu run, for each
+        * vcpu. Yes this is late. Blame it on the stupid API.
+        */
+       vcpu->kvm->arch.timer_data.ppi[idx] = irq;
+ out:
 -      mutex_unlock(&vcpu->kvm->arch.timer_data.lock);
++      mutex_unlock(&vcpu->kvm->arch.config_lock);
+       return ret;
  }
  
  int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -34,6 -43,13 +43,11 @@@ struct arch_timer_offset 
  struct arch_timer_vm_data {
        /* Offset applied to the virtual timer/counter */
        u64     voffset;
 -      struct mutex    lock;
 -
+       /* Offset applied to the physical timer/counter */
+       u64     poffset;
+       /* The PPI for each timer, global to the VM */
+       u8      ppi[NR_KVM_TIMERS];
  };
  
  struct arch_timer_context {