#include <linux/hrtimer.h>
#include <linux/workqueue.h>
-struct arch_timer_kvm {
- /* Virtual offset */
- u64 cntvoff;
-};
-
struct arch_timer_context {
/* Registers: control register, timer value */
u32 cnt_ctl;
/* Active IRQ state caching */
bool active_cleared_last;
+
+ /* Virtual offset */
+ u64 cntvoff;
};
struct arch_timer_cpu {
int kvm_timer_hyp_init(void);
int kvm_timer_enable(struct kvm_vcpu *vcpu);
-void kvm_timer_init(struct kvm *kvm);
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
const struct kvm_irq_level *irq);
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
{
u64 cval, now;
+ struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
- cval = vcpu_vtimer(vcpu)->cnt_cval;
- now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
+ cval = vtimer->cnt_cval;
+ now = kvm_phys_timer_read() - vtimer->cntvoff;
if (now < cval) {
u64 ns;
return false;
cval = vtimer->cnt_cval;
- now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
+ now = kvm_phys_timer_read() - vtimer->cntvoff;
return cval <= now;
}
return 0;
}
+/* Make the updates of cntvoff for all vtimer contexts atomic */
+static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
+{
+ int i;
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_vcpu *tmp;
+
+ mutex_lock(&kvm->lock);
+ kvm_for_each_vcpu(i, tmp, kvm)
+ vcpu_vtimer(tmp)->cntvoff = cntvoff;
+
+ /*
+ * When called from the vcpu create path, the CPU being created is not
+ * included in the loop above, so we just set it here as well.
+ */
+ vcpu_vtimer(vcpu)->cntvoff = cntvoff;
+ mutex_unlock(&kvm->lock);
+}
+
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ /* Synchronize cntvoff across all vtimers of a VM. */
+ update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
+
INIT_WORK(&timer->expired, kvm_timer_inject_irq_work);
hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
timer->timer.function = kvm_timer_expire;
vtimer->cnt_ctl = value;
break;
case KVM_REG_ARM_TIMER_CNT:
- vcpu->kvm->arch.timer.cntvoff = kvm_phys_timer_read() - value;
+ update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value);
break;
case KVM_REG_ARM_TIMER_CVAL:
vtimer->cnt_cval = value;
case KVM_REG_ARM_TIMER_CTL:
return vtimer->cnt_ctl;
case KVM_REG_ARM_TIMER_CNT:
- return kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
+ return kvm_phys_timer_read() - vtimer->cntvoff;
case KVM_REG_ARM_TIMER_CVAL:
return vtimer->cnt_cval;
}
return 0;
}
-void kvm_timer_init(struct kvm *kvm)
-{
- kvm->arch.timer.cntvoff = kvm_phys_timer_read();
-}
-
/*
* On VHE system, we only need to configure trap on physical timer and counter
* accesses in EL0 and EL1 once, not for every world switch.
void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
{
- struct kvm *kvm = kern_hyp_va(vcpu->kvm);
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
u64 val;
}
if (timer->enabled) {
- write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
+ write_sysreg(vtimer->cntvoff, cntvoff_el2);
write_sysreg_el0(vtimer->cnt_cval, cntv_cval);
isb();
write_sysreg_el0(vtimer->cnt_ctl, cntv_ctl);