KVM: X86: Count attempted/successful directed yield
authorWanpeng Li <wanpengli@tencent.com>
Fri, 9 Apr 2021 04:18:30 +0000 (12:18 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 19 Apr 2021 22:04:46 +0000 (18:04 -0400)
To analyze some performance issues with lock contention and scheduling,
it is nice to know when directed yield are successful or failing.

Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
Message-Id: <1617941911-5338-2-git-send-email-wanpengli@tencent.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/x86.c

index 2f6f4ae..770063d 100644 (file)
@@ -1146,6 +1146,8 @@ struct kvm_vcpu_stat {
        u64 halt_poll_success_ns;
        u64 halt_poll_fail_ns;
        u64 nested_run;
+       u64 directed_yield_attempted;
+       u64 directed_yield_successful;
 };
 
 struct x86_instruction_info;
index 91c98e7..05a4bce 100644 (file)
@@ -246,6 +246,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
        VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
        VCPU_STAT("nested_run", nested_run),
+       VCPU_STAT("directed_yield_attempted", directed_yield_attempted),
+       VCPU_STAT("directed_yield_successful", directed_yield_successful),
        VM_STAT("mmu_shadow_zapped", mmu_shadow_zapped),
        VM_STAT("mmu_pte_write", mmu_pte_write),
        VM_STAT("mmu_pde_zapped", mmu_pde_zapped),
@@ -8208,21 +8210,31 @@ void kvm_apicv_init(struct kvm *kvm, bool enable)
 }
 EXPORT_SYMBOL_GPL(kvm_apicv_init);
 
-static void kvm_sched_yield(struct kvm *kvm, unsigned long dest_id)
+static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
 {
        struct kvm_vcpu *target = NULL;
        struct kvm_apic_map *map;
 
+       vcpu->stat.directed_yield_attempted++;
+
        rcu_read_lock();
-       map = rcu_dereference(kvm->arch.apic_map);
+       map = rcu_dereference(vcpu->kvm->arch.apic_map);
 
        if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id])
                target = map->phys_map[dest_id]->vcpu;
 
        rcu_read_unlock();
 
-       if (target && READ_ONCE(target->ready))
-               kvm_vcpu_yield_to(target);
+       if (!target || !READ_ONCE(target->ready))
+               goto no_yield;
+
+       if (kvm_vcpu_yield_to(target) <= 0)
+               goto no_yield;
+
+       vcpu->stat.directed_yield_successful++;
+
+no_yield:
+       return;
 }
 
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
@@ -8269,7 +8281,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
                        break;
 
                kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1);
-               kvm_sched_yield(vcpu->kvm, a1);
+               kvm_sched_yield(vcpu, a1);
                ret = 0;
                break;
 #ifdef CONFIG_X86_64
@@ -8287,7 +8299,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
                if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SCHED_YIELD))
                        break;
 
-               kvm_sched_yield(vcpu->kvm, a0);
+               kvm_sched_yield(vcpu, a0);
                ret = 0;
                break;
        default: