int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
-bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx);
+bool kvm_timer_is_pending(struct kvm_vcpu *vcpu);
+
void kvm_timer_schedule(struct kvm_vcpu *vcpu);
void kvm_timer_unschedule(struct kvm_vcpu *vcpu);
static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx);
static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
struct arch_timer_context *timer_ctx);
+static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx);
u64 kvm_phys_timer_read(void)
{
return HRTIMER_NORESTART;
}
-bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
+static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
{
u64 cval, now;
return cval <= now;
}
+bool kvm_timer_is_pending(struct kvm_vcpu *vcpu)
+{
+ struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
+ struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
+
+ if (vtimer->irq.level || ptimer->irq.level)
+ return true;
+
+ /*
+ * When this is called from withing the wait loop of kvm_vcpu_block(),
+ * the software view of the timer state is up to date (timer->loaded
+ * is false), and so we can simply check if the timer should fire now.
+ */
+ if (!vtimer->loaded && kvm_timer_should_fire(vtimer))
+ return true;
+
+ return kvm_timer_should_fire(ptimer);
+}
+
/*
* Reflect the timer output level into the kvm_run structure
*/
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
- return kvm_timer_should_fire(vcpu_vtimer(vcpu)) ||
- kvm_timer_should_fire(vcpu_ptimer(vcpu));
+ return kvm_timer_is_pending(vcpu);
}
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)