KVM: Rename kvm_vcpu_block() => kvm_vcpu_halt()
authorSean Christopherson <seanjc@google.com>
Sat, 9 Oct 2021 02:12:06 +0000 (19:12 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 8 Dec 2021 09:24:51 +0000 (04:24 -0500)
Rename kvm_vcpu_block() to kvm_vcpu_halt() in preparation for splitting
the actual "block" sequences into a separate helper (to be named
kvm_vcpu_block()).  x86 will use the standalone block-only path to handle
non-halt cases where the vCPU is not runnable.

Rename block_ns to halt_ns to match the new function name.

No functional change intended.

Reviewed-by: David Matlack <dmatlack@google.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20211009021236.4122790-14-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
14 files changed:
arch/arm64/kvm/arch_timer.c
arch/arm64/kvm/arm.c
arch/arm64/kvm/handle_exit.c
arch/arm64/kvm/psci.c
arch/mips/kvm/emulate.c
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/book3s_pr_papr.c
arch/powerpc/kvm/booke.c
arch/powerpc/kvm/powerpc.c
arch/riscv/kvm/vcpu_exit.c
arch/s390/kvm/interrupt.c
arch/x86/kvm/x86.c
include/linux/kvm_host.h
virt/kvm/kvm_main.c

index d6f4114..3aeaa79 100644 (file)
@@ -467,7 +467,7 @@ out:
 }
 
 /*
- * Schedule the background timer before calling kvm_vcpu_block, so that this
+ * Schedule the background timer before calling kvm_vcpu_halt, so that this
  * thread is removed from its waitqueue and made runnable when there's a timer
  * interrupt to handle.
  */
index ced54a3..77ecc11 100644 (file)
@@ -681,7 +681,7 @@ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
        vgic_v4_put(vcpu, true);
        preempt_enable();
 
-       kvm_vcpu_block(vcpu);
+       kvm_vcpu_halt(vcpu);
        kvm_clear_request(KVM_REQ_UNHALT, vcpu);
 
        preempt_disable();
index 4794563..6d0baf7 100644 (file)
@@ -82,7 +82,7 @@ static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
  *
  * WFE: Yield the CPU and come back to this vcpu when the scheduler
  * decides to.
- * WFI: Simply call kvm_vcpu_block(), which will halt execution of
+ * WFI: Simply call kvm_vcpu_halt(), which will halt execution of
  * world-switches and schedule other host processes until there is an
  * incoming IRQ or FIQ to the VM.
  */
index ed675fc..ad6c9ef 100644 (file)
@@ -46,7 +46,7 @@ static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
         * specification (ARM DEN 0022A). This means all suspend states
         * for KVM will preserve the register state.
         */
-       kvm_vcpu_block(vcpu);
+       kvm_vcpu_halt(vcpu);
        kvm_clear_request(KVM_REQ_UNHALT, vcpu);
 
        return PSCI_RET_SUCCESS;
index 22e745e..b494d8d 100644 (file)
@@ -952,7 +952,7 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
        if (!vcpu->arch.pending_exceptions) {
                kvm_vz_lose_htimer(vcpu);
                vcpu->arch.wait = 1;
-               kvm_vcpu_block(vcpu);
+               kvm_vcpu_halt(vcpu);
 
                /*
                 * We we are runnable, then definitely go off to user space to
index 30426e8..34a801c 100644 (file)
@@ -492,7 +492,7 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
 
        if (msr & MSR_POW) {
                if (!vcpu->arch.pending_exceptions) {
-                       kvm_vcpu_block(vcpu);
+                       kvm_vcpu_halt(vcpu);
                        kvm_clear_request(KVM_REQ_UNHALT, vcpu);
                        vcpu->stat.generic.halt_wakeup++;
 
index ac14239..1f10e7d 100644 (file)
@@ -376,7 +376,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
                return kvmppc_h_pr_stuff_tce(vcpu);
        case H_CEDE:
                kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE);
-               kvm_vcpu_block(vcpu);
+               kvm_vcpu_halt(vcpu);
                kvm_clear_request(KVM_REQ_UNHALT, vcpu);
                vcpu->stat.generic.halt_wakeup++;
                return EMULATE_DONE;
index 53b4c95..06c5830 100644 (file)
@@ -718,7 +718,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
 
        if (vcpu->arch.shared->msr & MSR_WE) {
                local_irq_enable();
-               kvm_vcpu_block(vcpu);
+               kvm_vcpu_halt(vcpu);
                kvm_clear_request(KVM_REQ_UNHALT, vcpu);
                hard_irq_disable();
 
index 7de9ddb..2ad0ccd 100644 (file)
@@ -236,7 +236,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
                break;
        case EV_HCALL_TOKEN(EV_IDLE):
                r = EV_SUCCESS;
-               kvm_vcpu_block(vcpu);
+               kvm_vcpu_halt(vcpu);
                kvm_clear_request(KVM_REQ_UNHALT, vcpu);
                break;
        default:
index 7f2d742..571f319 100644 (file)
@@ -146,7 +146,7 @@ static int system_opcode_insn(struct kvm_vcpu *vcpu,
                vcpu->stat.wfi_exit_stat++;
                if (!kvm_arch_vcpu_runnable(vcpu)) {
                        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
-                       kvm_vcpu_block(vcpu);
+                       kvm_vcpu_halt(vcpu);
                        vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
                        kvm_clear_request(KVM_REQ_UNHALT, vcpu);
                }
index 3c8246f..dbabd6f 100644 (file)
@@ -1335,7 +1335,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
        VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
 no_timer:
        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
-       kvm_vcpu_block(vcpu);
+       kvm_vcpu_halt(vcpu);
        vcpu->valid_wakeup = false;
        __unset_cpu_idle(vcpu);
        vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
index 43cabc7..e3dd76f 100644 (file)
@@ -8727,6 +8727,13 @@ void kvm_arch_exit(void)
 
 static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
 {
+       /*
+        * The vCPU has halted, e.g. executed HLT.  Update the run state if the
+        * local APIC is in-kernel, the run loop will detect the non-runnable
+        * state and halt the vCPU.  Exit to userspace if the local APIC is
+        * managed by userspace, in which case userspace is responsible for
+        * handling wake events.
+        */
        ++vcpu->stat.halt_exits;
        if (lapic_in_kernel(vcpu)) {
                vcpu->arch.mp_state = state;
@@ -9999,7 +10006,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
        if (!kvm_arch_vcpu_runnable(vcpu) &&
            (!kvm_x86_ops.pre_block || static_call(kvm_x86_pre_block)(vcpu) == 0)) {
                srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
-               kvm_vcpu_block(vcpu);
+               kvm_vcpu_halt(vcpu);
                vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
 
                if (kvm_x86_ops.post_block)
@@ -10196,7 +10203,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                        r = -EINTR;
                        goto out;
                }
-               kvm_vcpu_block(vcpu);
+               kvm_vcpu_halt(vcpu);
                if (kvm_apic_accept_events(vcpu) < 0) {
                        r = 0;
                        goto out;
index afacbfb..ea3c22d 100644 (file)
@@ -1102,7 +1102,7 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
 void kvm_sigset_activate(struct kvm_vcpu *vcpu);
 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
 
-void kvm_vcpu_block(struct kvm_vcpu *vcpu);
+void kvm_vcpu_halt(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
index 53c5860..0d301c9 100644 (file)
@@ -3294,17 +3294,14 @@ static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
        }
 }
 
-/*
- * The vCPU has executed a HLT instruction with in-kernel mode enabled.
- */
-void kvm_vcpu_block(struct kvm_vcpu *vcpu)
+void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
 {
        struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
        bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
        bool do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns;
        ktime_t start, cur, poll_end;
        bool waited = false;
-       u64 block_ns;
+       u64 halt_ns;
 
        start = cur = poll_end = ktime_get();
        if (do_halt_poll) {
@@ -3346,7 +3343,8 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
                                ktime_to_ns(cur) - ktime_to_ns(poll_end));
        }
 out:
-       block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
+       /* The total time the vCPU was "halted", including polling time. */
+       halt_ns = ktime_to_ns(cur) - ktime_to_ns(start);
 
        /*
         * Note, halt-polling is considered successful so long as the vCPU was
@@ -3360,24 +3358,24 @@ out:
                if (!vcpu_valid_wakeup(vcpu)) {
                        shrink_halt_poll_ns(vcpu);
                } else if (vcpu->kvm->max_halt_poll_ns) {
-                       if (block_ns <= vcpu->halt_poll_ns)
+                       if (halt_ns <= vcpu->halt_poll_ns)
                                ;
                        /* we had a long block, shrink polling */
                        else if (vcpu->halt_poll_ns &&
-                                       block_ns > vcpu->kvm->max_halt_poll_ns)
+                                halt_ns > vcpu->kvm->max_halt_poll_ns)
                                shrink_halt_poll_ns(vcpu);
                        /* we had a short halt and our poll time is too small */
                        else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns &&
-                                       block_ns < vcpu->kvm->max_halt_poll_ns)
+                                halt_ns < vcpu->kvm->max_halt_poll_ns)
                                grow_halt_poll_ns(vcpu);
                } else {
                        vcpu->halt_poll_ns = 0;
                }
        }
 
-       trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu));
+       trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu));
 }
-EXPORT_SYMBOL_GPL(kvm_vcpu_block);
+EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
 
 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
 {