KVM: PPC: Book3S HV: small pseries_do_hcall cleanup
authorNicholas Piggin <npiggin@gmail.com>
Fri, 28 May 2021 09:07:47 +0000 (19:07 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 10 Jun 2021 12:12:15 +0000 (22:12 +1000)
Functionality should not be changed.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210528090752.3542186-28-npiggin@gmail.com
arch/powerpc/kvm/book3s_hv.c

index cf40328..9ba7774 100644 (file)
@@ -927,6 +927,7 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
 
 int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
 {
+       struct kvm *kvm = vcpu->kvm;
        unsigned long req = kvmppc_get_gpr(vcpu, 3);
        unsigned long target, ret = H_SUCCESS;
        int yield_count;
@@ -942,7 +943,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
                break;
        case H_PROD:
                target = kvmppc_get_gpr(vcpu, 4);
-               tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
+               tvcpu = kvmppc_find_vcpu(kvm, target);
                if (!tvcpu) {
                        ret = H_PARAMETER;
                        break;
@@ -956,7 +957,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
                target = kvmppc_get_gpr(vcpu, 4);
                if (target == -1)
                        break;
-               tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
+               tvcpu = kvmppc_find_vcpu(kvm, target);
                if (!tvcpu) {
                        ret = H_PARAMETER;
                        break;
@@ -972,12 +973,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
                                        kvmppc_get_gpr(vcpu, 6));
                break;
        case H_RTAS:
-               if (list_empty(&vcpu->kvm->arch.rtas_tokens))
+               if (list_empty(&kvm->arch.rtas_tokens))
                        return RESUME_HOST;
 
-               idx = srcu_read_lock(&vcpu->kvm->srcu);
+               idx = srcu_read_lock(&kvm->srcu);
                rc = kvmppc_rtas_hcall(vcpu);
-               srcu_read_unlock(&vcpu->kvm->srcu, idx);
+               srcu_read_unlock(&kvm->srcu, idx);
 
                if (rc == -ENOENT)
                        return RESUME_HOST;
@@ -1064,12 +1065,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
 
        case H_SET_PARTITION_TABLE:
                ret = H_FUNCTION;
-               if (nesting_enabled(vcpu->kvm))
+               if (nesting_enabled(kvm))
                        ret = kvmhv_set_partition_table(vcpu);
                break;
        case H_ENTER_NESTED:
                ret = H_FUNCTION;
-               if (!nesting_enabled(vcpu->kvm))
+               if (!nesting_enabled(kvm))
                        break;
                ret = kvmhv_enter_nested_guest(vcpu);
                if (ret == H_INTERRUPT) {
@@ -1084,12 +1085,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
                break;
        case H_TLB_INVALIDATE:
                ret = H_FUNCTION;
-               if (nesting_enabled(vcpu->kvm))
+               if (nesting_enabled(kvm))
                        ret = kvmhv_do_nested_tlbie(vcpu);
                break;
        case H_COPY_TOFROM_GUEST:
                ret = H_FUNCTION;
-               if (nesting_enabled(vcpu->kvm))
+               if (nesting_enabled(kvm))
                        ret = kvmhv_copy_tofrom_guest_nested(vcpu);
                break;
        case H_PAGE_INIT:
@@ -1100,7 +1101,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
        case H_SVM_PAGE_IN:
                ret = H_UNSUPPORTED;
                if (kvmppc_get_srr1(vcpu) & MSR_S)
-                       ret = kvmppc_h_svm_page_in(vcpu->kvm,
+                       ret = kvmppc_h_svm_page_in(kvm,
                                                   kvmppc_get_gpr(vcpu, 4),
                                                   kvmppc_get_gpr(vcpu, 5),
                                                   kvmppc_get_gpr(vcpu, 6));
@@ -1108,7 +1109,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
        case H_SVM_PAGE_OUT:
                ret = H_UNSUPPORTED;
                if (kvmppc_get_srr1(vcpu) & MSR_S)
-                       ret = kvmppc_h_svm_page_out(vcpu->kvm,
+                       ret = kvmppc_h_svm_page_out(kvm,
                                                    kvmppc_get_gpr(vcpu, 4),
                                                    kvmppc_get_gpr(vcpu, 5),
                                                    kvmppc_get_gpr(vcpu, 6));
@@ -1116,12 +1117,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
        case H_SVM_INIT_START:
                ret = H_UNSUPPORTED;
                if (kvmppc_get_srr1(vcpu) & MSR_S)
-                       ret = kvmppc_h_svm_init_start(vcpu->kvm);
+                       ret = kvmppc_h_svm_init_start(kvm);
                break;
        case H_SVM_INIT_DONE:
                ret = H_UNSUPPORTED;
                if (kvmppc_get_srr1(vcpu) & MSR_S)
-                       ret = kvmppc_h_svm_init_done(vcpu->kvm);
+                       ret = kvmppc_h_svm_init_done(kvm);
                break;
        case H_SVM_INIT_ABORT:
                /*
@@ -1131,7 +1132,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
                 * Instead the kvm->arch.secure_guest flag is checked inside
                 * kvmppc_h_svm_init_abort().
                 */
-               ret = kvmppc_h_svm_init_abort(vcpu->kvm);
+               ret = kvmppc_h_svm_init_abort(kvm);
                break;
 
        default: