Merge tag 'kvmarm-fixes-5.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmar...
[platform/kernel/linux-starfive.git] / virt / kvm / arm / arm.c
index 560d6f2..eda7b62 100644 (file)
@@ -49,9 +49,6 @@ __asm__(".arch_extension      virt");
 DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data);
 static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
 
-/* Per-CPU variable containing the currently running vcpu. */
-static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
-
 /* The VMID used in the VTTBR */
 static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
 static u32 kvm_next_vmid;
@@ -60,31 +57,8 @@ static DEFINE_SPINLOCK(kvm_vmid_lock);
 static bool vgic_present;
 
 static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
-
-static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
-{
-       __this_cpu_write(kvm_arm_running_vcpu, vcpu);
-}
-
 DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
 
-/**
- * kvm_arm_get_running_vcpu - get the vcpu running on the current CPU.
- * Must be called from non-preemptible context
- */
-struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
-{
-       return __this_cpu_read(kvm_arm_running_vcpu);
-}
-
-/**
- * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus.
- */
-struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
-{
-       return &kvm_arm_running_vcpu;
-}
-
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
 {
        return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
@@ -192,7 +166,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
 
        for (i = 0; i < KVM_MAX_VCPUS; ++i) {
                if (kvm->vcpus[i]) {
-                       kvm_arch_vcpu_free(kvm->vcpus[i]);
+                       kvm_vcpu_destroy(kvm->vcpus[i]);
                        kvm->vcpus[i] = NULL;
                }
        }
@@ -277,49 +251,46 @@ void kvm_arch_free_vm(struct kvm *kvm)
                vfree(kvm);
 }
 
-struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
+{
+       if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
+               return -EBUSY;
+
+       if (id >= kvm->arch.max_vcpus)
+               return -EINVAL;
+
+       return 0;
+}
+
+int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
 {
        int err;
-       struct kvm_vcpu *vcpu;
 
-       if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) {
-               err = -EBUSY;
-               goto out;
-       }
+       /* Force users to call KVM_ARM_VCPU_INIT */
+       vcpu->arch.target = -1;
+       bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
 
-       if (id >= kvm->arch.max_vcpus) {
-               err = -EINVAL;
-               goto out;
-       }
+       /* Set up the timer */
+       kvm_timer_vcpu_init(vcpu);
 
-       vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
-       if (!vcpu) {
-               err = -ENOMEM;
-               goto out;
-       }
+       kvm_pmu_vcpu_init(vcpu);
 
-       err = kvm_vcpu_init(vcpu, kvm, id);
-       if (err)
-               goto free_vcpu;
+       kvm_arm_reset_debug_ptr(vcpu);
+
+       kvm_arm_pvtime_vcpu_init(&vcpu->arch);
 
-       err = create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
+       err = kvm_vgic_vcpu_init(vcpu);
        if (err)
-               goto vcpu_uninit;
+               return err;
 
-       return vcpu;
-vcpu_uninit:
-       kvm_vcpu_uninit(vcpu);
-free_vcpu:
-       kmem_cache_free(kvm_vcpu_cache, vcpu);
-out:
-       return ERR_PTR(err);
+       return create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
 }
 
 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 {
 }
 
-void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
        if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm)))
                static_branch_dec(&userspace_irqchip_in_use);
@@ -327,13 +298,8 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
        kvm_mmu_free_memory_caches(vcpu);
        kvm_timer_vcpu_terminate(vcpu);
        kvm_pmu_vcpu_destroy(vcpu);
-       kvm_vcpu_uninit(vcpu);
-       kmem_cache_free(kvm_vcpu_cache, vcpu);
-}
 
-void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
-{
-       kvm_arch_vcpu_free(vcpu);
+       kvm_arm_vcpu_destroy(vcpu);
 }
 
 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
@@ -366,24 +332,6 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
        preempt_enable();
 }
 
-int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
-{
-       /* Force users to call KVM_ARM_VCPU_INIT */
-       vcpu->arch.target = -1;
-       bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
-
-       /* Set up the timer */
-       kvm_timer_vcpu_init(vcpu);
-
-       kvm_pmu_vcpu_init(vcpu);
-
-       kvm_arm_reset_debug_ptr(vcpu);
-
-       kvm_arm_pvtime_vcpu_init(&vcpu->arch);
-
-       return kvm_vgic_vcpu_init(vcpu);
-}
-
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        int *last_ran;
@@ -404,7 +352,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        vcpu->cpu = cpu;
        vcpu->arch.host_cpu_context = &cpu_data->host_ctxt;
 
-       kvm_arm_set_running_vcpu(vcpu);
        kvm_vgic_load(vcpu);
        kvm_timer_vcpu_load(vcpu);
        kvm_vcpu_load_sysregs(vcpu);
@@ -430,8 +377,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
        kvm_vcpu_pmu_restore_host(vcpu);
 
        vcpu->cpu = -1;
-
-       kvm_arm_set_running_vcpu(NULL);
 }
 
 static void vcpu_power_off(struct kvm_vcpu *vcpu)