Merge branch 'kvm-arm64/misc-5.9' into kvmarm-master/next-WIP
[platform/kernel/linux-rpi.git] / arch / arm64 / kvm / arm.c
index 5bf9bf5..98f05bd 100644 (file)
@@ -106,22 +106,15 @@ static int kvm_arm_default_max_vcpus(void)
  */
 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 {
-       int ret, cpu;
+       int ret;
 
        ret = kvm_arm_setup_stage2(kvm, type);
        if (ret)
                return ret;
 
-       kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
-       if (!kvm->arch.last_vcpu_ran)
-               return -ENOMEM;
-
-       for_each_possible_cpu(cpu)
-               *per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;
-
-       ret = kvm_alloc_stage2_pgd(kvm);
+       ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu);
        if (ret)
-               goto out_fail_alloc;
+               return ret;
 
        ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
        if (ret)
@@ -129,18 +122,12 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 
        kvm_vgic_early_init(kvm);
 
-       /* Mark the initial VMID generation invalid */
-       kvm->arch.vmid.vmid_gen = 0;
-
        /* The maximum number of VCPUs is limited by the host's GIC model */
        kvm->arch.max_vcpus = kvm_arm_default_max_vcpus();
 
        return ret;
 out_free_stage2_pgd:
-       kvm_free_stage2_pgd(kvm);
-out_fail_alloc:
-       free_percpu(kvm->arch.last_vcpu_ran);
-       kvm->arch.last_vcpu_ran = NULL;
+       kvm_free_stage2_pgd(&kvm->arch.mmu);
        return ret;
 }
 
@@ -160,9 +147,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
 
        kvm_vgic_destroy(kvm);
 
-       free_percpu(kvm->arch.last_vcpu_ran);
-       kvm->arch.last_vcpu_ran = NULL;
-
        for (i = 0; i < KVM_MAX_VCPUS; ++i) {
                if (kvm->vcpus[i]) {
                        kvm_vcpu_destroy(kvm->vcpus[i]);
@@ -279,6 +263,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
 
        kvm_arm_pvtime_vcpu_init(&vcpu->arch);
 
+       vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
+
        err = kvm_vgic_vcpu_init(vcpu);
        if (err)
                return err;
@@ -334,16 +320,18 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
 
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
+       struct kvm_s2_mmu *mmu;
        int *last_ran;
 
-       last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
+       mmu = vcpu->arch.hw_mmu;
+       last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
 
        /*
         * We might get preempted before the vCPU actually runs, but
         * over-invalidation doesn't affect correctness.
         */
        if (*last_ran != vcpu->vcpu_id) {
-               kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
+               kvm_call_hyp(__kvm_tlb_flush_local_vmid, mmu);
                *last_ran = vcpu->vcpu_id;
        }
 
@@ -351,7 +339,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
        kvm_vgic_load(vcpu);
        kvm_timer_vcpu_load(vcpu);
-       kvm_vcpu_load_sysregs(vcpu);
+       if (has_vhe())
+               kvm_vcpu_load_sysregs_vhe(vcpu);
        kvm_arch_vcpu_load_fp(vcpu);
        kvm_vcpu_pmu_restore_guest(vcpu);
        if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
@@ -369,7 +358,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
        kvm_arch_vcpu_put_fp(vcpu);
-       kvm_vcpu_put_sysregs(vcpu);
+       if (has_vhe())
+               kvm_vcpu_put_sysregs_vhe(vcpu);
        kvm_timer_vcpu_put(vcpu);
        kvm_vgic_put(vcpu);
        kvm_vcpu_pmu_restore_host(vcpu);
@@ -677,7 +667,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                 */
                cond_resched();
 
-               update_vmid(&vcpu->kvm->arch.vmid);
+               update_vmid(&vcpu->arch.hw_mmu->vmid);
 
                check_vcpu_requests(vcpu);
 
@@ -726,13 +716,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                 */
                smp_store_mb(vcpu->mode, IN_GUEST_MODE);
 
-               if (ret <= 0 || need_new_vmid_gen(&vcpu->kvm->arch.vmid) ||
+               if (ret <= 0 || need_new_vmid_gen(&vcpu->arch.hw_mmu->vmid) ||
                    kvm_request_pending(vcpu)) {
                        vcpu->mode = OUTSIDE_GUEST_MODE;
                        isb(); /* Ensure work in x_flush_hwstate is committed */
                        kvm_pmu_sync_hwstate(vcpu);
                        if (static_branch_unlikely(&userspace_irqchip_in_use))
-                               kvm_timer_sync_hwstate(vcpu);
+                               kvm_timer_sync_user(vcpu);
                        kvm_vgic_sync_hwstate(vcpu);
                        local_irq_enable();
                        preempt_enable();
@@ -747,11 +737,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                trace_kvm_entry(*vcpu_pc(vcpu));
                guest_enter_irqoff();
 
-               if (has_vhe()) {
-                       ret = kvm_vcpu_run_vhe(vcpu);
-               } else {
-                       ret = kvm_call_hyp_ret(__kvm_vcpu_run_nvhe, vcpu);
-               }
+               ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
 
                vcpu->mode = OUTSIDE_GUEST_MODE;
                vcpu->stat.exits++;
@@ -781,7 +767,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                 * timer virtual interrupt state.
                 */
                if (static_branch_unlikely(&userspace_irqchip_in_use))
-                       kvm_timer_sync_hwstate(vcpu);
+                       kvm_timer_sync_user(vcpu);
 
                kvm_arch_vcpu_ctxsync_fp(vcpu);
 
@@ -1284,7 +1270,7 @@ static void cpu_init_hyp_mode(void)
         * so that we can use adr_l to access per-cpu variables in EL2.
         */
        tpidr_el2 = ((unsigned long)this_cpu_ptr(&kvm_host_data) -
-                    (unsigned long)kvm_ksym_ref(kvm_host_data));
+                    (unsigned long)kvm_ksym_ref(&kvm_host_data));
 
        pgd_ptr = kvm_mmu_get_httbr();
        hyp_stack_ptr = __this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE;
@@ -1305,7 +1291,7 @@ static void cpu_init_hyp_mode(void)
         */
        if (this_cpu_has_cap(ARM64_SSBS) &&
            arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
-               kvm_call_hyp(__kvm_enable_ssbs);
+               kvm_call_hyp_nvhe(__kvm_enable_ssbs);
        }
 }