KVM: Wrap kvm_{gfn,hva}_range.pte in a per-action union
authorSean Christopherson <seanjc@google.com>
Sat, 29 Jul 2023 00:41:44 +0000 (17:41 -0700)
committerSean Christopherson <seanjc@google.com>
Thu, 17 Aug 2023 18:26:53 +0000 (11:26 -0700)
Wrap kvm_{gfn,hva}_range.pte in a union so that future notifier events can
pass event specific information up and down the stack without needing to
constantly expand and churn the APIs.  Lockless aging of SPTEs will pass
around a bitmap, and support for memory attributes will pass around the
new attributes for the range.

Add a "KVM_NO_ARG" placeholder to simplify handling events without an
argument (creating a dummy union variable is midly annoying).

Opportunstically drop explicit zero-initialization of the "pte" field, as
omitting the field (now a union) has the same effect.

Cc: Yu Zhao <yuzhao@google.com>
Link: https://lore.kernel.org/all/CAOUHufagkd2Jk3_HrVoFFptRXM=hX2CV8f+M-dka-hJU4bP8kw@mail.gmail.com
Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Acked-by: Yu Zhao <yuzhao@google.com>
Link: https://lore.kernel.org/r/20230729004144.1054885-1-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/arm64/kvm/mmu.c
arch/mips/kvm/mmu.c
arch/riscv/kvm/mmu.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/tdp_mmu.c
include/linux/kvm_host.h
virt/kvm/kvm_main.c

index 6db9ef288ec388ed62e4275a3b6b63d17f6d022d..55f03a68f1cdbe6fbb174b7a70826513d2f5066d 100644 (file)
@@ -1721,7 +1721,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
 
 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
 {
-       kvm_pfn_t pfn = pte_pfn(range->pte);
+       kvm_pfn_t pfn = pte_pfn(range->arg.pte);
 
        if (!kvm->arch.mmu.pgt)
                return false;
index e8c08988ed377dabebc0dbf89d343d77aced3a1d..7b2ac1319d70acafd68cfe6b30680abc7bd04234 100644 (file)
@@ -447,7 +447,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
 {
        gpa_t gpa = range->start << PAGE_SHIFT;
-       pte_t hva_pte = range->pte;
+       pte_t hva_pte = range->arg.pte;
        pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
        pte_t old_pte;
 
index f2eb47925806b6830acc9d32ebd456da81063608..857f4312b0f886d34c579a9c731b6eca593505a6 100644 (file)
@@ -559,7 +559,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
 {
        int ret;
-       kvm_pfn_t pfn = pte_pfn(range->pte);
+       kvm_pfn_t pfn = pte_pfn(range->arg.pte);
 
        if (!kvm->arch.pgd)
                return false;
index ec169f5c7dce21d5f730638ba86ebc99f3050146..d72f2b20f4301bd6519b31c6e11e4be3abbc2ba4 100644 (file)
@@ -1588,7 +1588,7 @@ static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
        for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
                                 range->start, range->end - 1, &iterator)
                ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
-                              iterator.level, range->pte);
+                              iterator.level, range->arg.pte);
 
        return ret;
 }
index 512163d52194b9945ec19e03b90ae590810107ab..6250bd3d20c1b81b400b8507c8c2586b29169c8e 100644 (file)
@@ -1241,7 +1241,7 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
        u64 new_spte;
 
        /* Huge pages aren't expected to be modified without first being zapped. */
-       WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
+       WARN_ON(pte_huge(range->arg.pte) || range->start + 1 != range->end);
 
        if (iter->level != PG_LEVEL_4K ||
            !is_shadow_present_pte(iter->old_spte))
@@ -1255,9 +1255,9 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
         */
        tdp_mmu_iter_set_spte(kvm, iter, 0);
 
-       if (!pte_write(range->pte)) {
+       if (!pte_write(range->arg.pte)) {
                new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
-                                                                 pte_pfn(range->pte));
+                                                                 pte_pfn(range->arg.pte));
 
                tdp_mmu_iter_set_spte(kvm, iter, new_spte);
        }
index 9d3ac7720da9f46a2de764bbd0508da040355f59..9125d0ab642d2fc4b8431e1fb9c76a521ff247a6 100644 (file)
@@ -256,11 +256,15 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
 #endif
 
 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
+union kvm_mmu_notifier_arg {
+       pte_t pte;
+};
+
 struct kvm_gfn_range {
        struct kvm_memory_slot *slot;
        gfn_t start;
        gfn_t end;
-       pte_t pte;
+       union kvm_mmu_notifier_arg arg;
        bool may_block;
 };
 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
index dfbaafbe3a00991bd40e21bc19dcf606ec553e4a..92c50dc159e8700b34d2a855d000b74c52fa2dd4 100644 (file)
@@ -526,7 +526,7 @@ typedef void (*on_unlock_fn_t)(struct kvm *kvm);
 struct kvm_hva_range {
        unsigned long start;
        unsigned long end;
-       pte_t pte;
+       union kvm_mmu_notifier_arg arg;
        hva_handler_t handler;
        on_lock_fn_t on_lock;
        on_unlock_fn_t on_unlock;
@@ -547,6 +547,8 @@ static void kvm_null_fn(void)
 }
 #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
 
+static const union kvm_mmu_notifier_arg KVM_MMU_NOTIFIER_NO_ARG;
+
 /* Iterate over each memslot intersecting [start, last] (inclusive) range */
 #define kvm_for_each_memslot_in_hva_range(node, slots, start, last)         \
        for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
@@ -591,7 +593,7 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
                         * bother making these conditional (to avoid writes on
                         * the second or later invocation of the handler).
                         */
-                       gfn_range.pte = range->pte;
+                       gfn_range.arg = range->arg;
                        gfn_range.may_block = range->may_block;
 
                        /*
@@ -632,14 +634,14 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
 static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
                                                unsigned long start,
                                                unsigned long end,
-                                               pte_t pte,
+                                               union kvm_mmu_notifier_arg arg,
                                                hva_handler_t handler)
 {
        struct kvm *kvm = mmu_notifier_to_kvm(mn);
        const struct kvm_hva_range range = {
                .start          = start,
                .end            = end,
-               .pte            = pte,
+               .arg            = arg,
                .handler        = handler,
                .on_lock        = (void *)kvm_null_fn,
                .on_unlock      = (void *)kvm_null_fn,
@@ -659,7 +661,6 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn
        const struct kvm_hva_range range = {
                .start          = start,
                .end            = end,
-               .pte            = __pte(0),
                .handler        = handler,
                .on_lock        = (void *)kvm_null_fn,
                .on_unlock      = (void *)kvm_null_fn,
@@ -693,6 +694,7 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
                                        pte_t pte)
 {
        struct kvm *kvm = mmu_notifier_to_kvm(mn);
+       const union kvm_mmu_notifier_arg arg = { .pte = pte };
 
        trace_kvm_set_spte_hva(address);
 
@@ -708,7 +710,7 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
        if (!READ_ONCE(kvm->mmu_invalidate_in_progress))
                return;
 
-       kvm_handle_hva_range(mn, address, address + 1, pte, kvm_change_spte_gfn);
+       kvm_handle_hva_range(mn, address, address + 1, arg, kvm_change_spte_gfn);
 }
 
 void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
@@ -747,7 +749,6 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
        const struct kvm_hva_range hva_range = {
                .start          = range->start,
                .end            = range->end,
-               .pte            = __pte(0),
                .handler        = kvm_unmap_gfn_range,
                .on_lock        = kvm_mmu_invalidate_begin,
                .on_unlock      = kvm_arch_guest_memory_reclaimed,
@@ -812,7 +813,6 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
        const struct kvm_hva_range hva_range = {
                .start          = range->start,
                .end            = range->end,
-               .pte            = __pte(0),
                .handler        = (void *)kvm_null_fn,
                .on_lock        = kvm_mmu_invalidate_end,
                .on_unlock      = (void *)kvm_null_fn,
@@ -845,7 +845,8 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
 {
        trace_kvm_age_hva(start, end);
 
-       return kvm_handle_hva_range(mn, start, end, __pte(0), kvm_age_gfn);
+       return kvm_handle_hva_range(mn, start, end, KVM_MMU_NOTIFIER_NO_ARG,
+                                   kvm_age_gfn);
 }
 
 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,