From 41628d334361670d825fb03c04568f5ef9f084dc Mon Sep 17 00:00:00 2001 From: Konstantin Weitz Date: Wed, 25 Apr 2012 15:30:38 +0200 Subject: [PATCH] KVM: s390: Implement the directed yield (diag 9c) hypervisor call for KVM This patch implements the directed yield hypercall found on other System z hypervisors. It delegates execution time to the virtual cpu specified in the instruction's parameter. Useful to avoid long spinlock waits in the guest. Christian Borntraeger: moved common code in virt/kvm/ Signed-off-by: Konstantin Weitz Signed-off-by: Christian Borntraeger Signed-off-by: Marcelo Tosatti --- arch/s390/include/asm/kvm_host.h | 1 + arch/s390/kvm/diag.c | 25 ++++++++++++++++++++++++ arch/s390/kvm/kvm-s390.c | 1 + include/linux/kvm_host.h | 1 + virt/kvm/kvm_main.c | 42 +++++++++++++++++++++++++--------------- 5 files changed, 54 insertions(+), 16 deletions(-) diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 7343872..dd17537 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -148,6 +148,7 @@ struct kvm_vcpu_stat { u32 instruction_sigp_restart; u32 diagnose_10; u32 diagnose_44; + u32 diagnose_9c; }; struct kvm_s390_io_info { diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index a353f0e..2d2ae32 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c @@ -53,6 +53,29 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu) return 0; } +static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_vcpu *tcpu; + int tid; + int i; + + tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; + vcpu->stat.diagnose_9c++; + VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid); + + if (tid == vcpu->vcpu_id) + return 0; + + kvm_for_each_vcpu(i, tcpu, kvm) + if (tcpu->vcpu_id == tid) { + kvm_vcpu_yield_to(tcpu); + break; + } + + return 0; +} + static int __diag_ipl_functions(struct kvm_vcpu *vcpu) { unsigned int reg = vcpu->arch.sie_block->ipa & 0xf; @@ -89,6 +112,8 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) return diag_release_pages(vcpu); case 0x44: return __diag_time_slice_end(vcpu); + case 0x9c: + return __diag_time_slice_end_directed(vcpu); case 0x308: return __diag_ipl_functions(vcpu); default: diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index d30c835..fd98914 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -74,6 +74,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, { "diagnose_10", VCPU_STAT(diagnose_10) }, { "diagnose_44", VCPU_STAT(diagnose_44) }, + { "diagnose_9c", VCPU_STAT(diagnose_9c) }, { NULL } }; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 6f34330..cae342d 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -461,6 +461,7 @@ void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, void kvm_vcpu_block(struct kvm_vcpu *vcpu); void kvm_vcpu_kick(struct kvm_vcpu *vcpu); +bool kvm_vcpu_yield_to(struct kvm_vcpu *target); void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); void kvm_resched(struct kvm_vcpu *vcpu); void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 1847c76..7e14068 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1543,6 +1543,31 @@ void kvm_resched(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_resched); +bool kvm_vcpu_yield_to(struct kvm_vcpu *target) +{ + struct pid *pid; + struct task_struct *task = NULL; + + rcu_read_lock(); + pid = rcu_dereference(target->pid); + if (pid) + task = get_pid_task(target->pid, PIDTYPE_PID); + rcu_read_unlock(); + if (!task) + return false; + if (task->flags & PF_VCPU) { + put_task_struct(task); + return false; + } + if (yield_to(task, 1)) { + put_task_struct(task); + return true; + } + put_task_struct(task); + return false; +} +EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); + void kvm_vcpu_on_spin(struct kvm_vcpu *me) { struct kvm *kvm = me->kvm; @@ -1561,8 +1586,6 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me) */ for (pass = 0; pass < 2 && !yielded; pass++) { kvm_for_each_vcpu(i, vcpu, kvm) { - struct task_struct *task = NULL; - struct pid *pid; if (!pass && i < last_boosted_vcpu) { i = last_boosted_vcpu; continue; @@ -1572,24 +1595,11 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me) continue; if (waitqueue_active(&vcpu->wq)) continue; - rcu_read_lock(); - pid = rcu_dereference(vcpu->pid); - if (pid) - task = get_pid_task(vcpu->pid, PIDTYPE_PID); - rcu_read_unlock(); - if (!task) - continue; - if (task->flags & PF_VCPU) { - put_task_struct(task); - continue; - } - if (yield_to(task, 1)) { - put_task_struct(task); + if (kvm_vcpu_yield_to(vcpu)) { kvm->last_boosted_vcpu = i; yielded = 1; break; } - put_task_struct(task); } } } -- 2.7.4