Merge branch 'kvm-sev-cgroup' into HEAD
authorPaolo Bonzini <pbonzini@redhat.com>
Thu, 22 Apr 2021 06:39:48 +0000 (02:39 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 22 Apr 2021 17:19:01 +0000 (13:19 -0400)
14 files changed:
1  2 
Documentation/virt/kvm/api.rst
MAINTAINERS
arch/x86/include/asm/kvm_host.h
arch/x86/kernel/kvm.c
arch/x86/kvm/Makefile
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/include/kvm_util.h
tools/testing/selftests/kvm/lib/kvm_util.c

Simple merge
diff --cc MAINTAINERS
Simple merge
@@@ -1036,20 -1048,12 +1059,15 @@@ struct kvm_arch 
        bool guest_can_read_msr_platform_info;
        bool exception_payload_enabled;
  
+       bool bus_lock_detection_enabled;
        /* Deflect RDMSR and WRMSR to user space when they trigger a #GP */
        u32 user_space_msr_mask;
-       struct {
-               u8 count;
-               bool default_allow:1;
-               struct msr_bitmap_range ranges[16];
-       } msr_filter;
-       bool bus_lock_detection_enabled;
+       struct kvm_x86_msr_filter __rcu *msr_filter;
  
 +      /* Guest can access the SGX PROVISIONKEY. */
 +      bool sgx_provisioning_allowed;
 +
        struct kvm_pmu_event_filter __rcu *pmu_event_filter;
        struct task_struct *nx_lpage_recovery_thread;
  
Simple merge
Simple merge
@@@ -411,43 -393,15 +411,42 @@@ static int nested_svm_load_cr3(struct k
        return 0;
  }
  
 -static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
 +void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
  {
 +      if (!svm->nested.vmcb02.ptr)
 +              return;
 +
 +      /* FIXME: merge g_pat from vmcb01 and vmcb12.  */
 +      svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat;
 +}
 +
 +static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
 +{
 +      bool new_vmcb12 = false;
 +
 +      nested_vmcb02_compute_g_pat(svm);
 +
        /* Load the nested guest state */
 -      svm->vmcb->save.es = vmcb12->save.es;
 -      svm->vmcb->save.cs = vmcb12->save.cs;
 -      svm->vmcb->save.ss = vmcb12->save.ss;
 -      svm->vmcb->save.ds = vmcb12->save.ds;
 -      svm->vmcb->save.gdtr = vmcb12->save.gdtr;
 -      svm->vmcb->save.idtr = vmcb12->save.idtr;
 +      if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) {
 +              new_vmcb12 = true;
 +              svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa;
 +      }
 +
 +      if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_SEG))) {
 +              svm->vmcb->save.es = vmcb12->save.es;
 +              svm->vmcb->save.cs = vmcb12->save.cs;
 +              svm->vmcb->save.ss = vmcb12->save.ss;
 +              svm->vmcb->save.ds = vmcb12->save.ds;
 +              svm->vmcb->save.cpl = vmcb12->save.cpl;
 +              vmcb_mark_dirty(svm->vmcb, VMCB_SEG);
 +      }
 +
 +      if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DT))) {
 +              svm->vmcb->save.gdtr = vmcb12->save.gdtr;
 +              svm->vmcb->save.idtr = vmcb12->save.idtr;
 +              vmcb_mark_dirty(svm->vmcb, VMCB_DT);
 +      }
 +
        kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
  
        /*
@@@ -182,23 -224,23 +221,27 @@@ static int sev_guest_init(struct kvm *k
        if (unlikely(sev->active))
                return ret;
  
-       asid = sev_asid_new(es_active);
++      sev->es_active = es_active;
+       asid = sev_asid_new(sev);
        if (asid < 0)
--              return ret;
++              goto e_no_asid;
+       sev->asid = asid;
  
        ret = sev_platform_init(&argp->error);
        if (ret)
                goto e_free;
  
        sev->active = true;
-       sev->es_active = es_active;
 +      sev->asid = asid;
        INIT_LIST_HEAD(&sev->regions_list);
  
        return 0;
  
  e_free:
-       sev_asid_free(asid);
+       sev_asid_free(sev);
+       sev->asid = 0;
++e_no_asid:
++      sev->es_active = false;
        return ret;
  }
  
@@@ -68,7 -65,7 +68,8 @@@ struct kvm_sev_info 
        unsigned long pages_locked; /* Number of pages locked */
        struct list_head regions_list;  /* List of registered regions */
        u64 ap_jump_table;      /* SEV-ES AP Jump Table address */
 +      struct kvm *enc_context_owner; /* Owner of copied encryption context */
+       struct misc_cg *misc_cg; /* For misc cgroup accounting */
  };
  
  struct kvm_svm {
Simple merge
Simple merge
Simple merge