KVM: arm64: nvhe: Synchronise with page table walker on TLBI
authorMarc Zyngier <maz@kernel.org>
Sat, 8 Apr 2023 16:04:24 +0000 (17:04 +0100)
committerMarc Zyngier <maz@kernel.org>
Fri, 14 Apr 2023 07:23:29 +0000 (08:23 +0100)
A TLBI from EL2 impacting EL1 involves messing with the EL1&0
translation regime, and the page table walker may still be
performing speculative walks.

Piggyback on the existing DSBs to always have a DSB ISH that
will synchronise all load/store operations that the PTW may
still have.

Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/hyp/nvhe/tlb.c

index d296d61..9781791 100644 (file)
@@ -15,8 +15,31 @@ struct tlb_inv_context {
 };
 
 static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
-                                 struct tlb_inv_context *cxt)
+                                 struct tlb_inv_context *cxt,
+                                 bool nsh)
 {
+       /*
+        * We have two requirements:
+        *
+        * - ensure that the page table updates are visible to all
+        *   CPUs, for which a dsb(DOMAIN-st) is what we need, DOMAIN
+        *   being either ish or nsh, depending on the invalidation
+        *   type.
+        *
+        * - complete any speculative page table walk started before
+        *   we trapped to EL2 so that we can mess with the MM
+        *   registers out of context, for which dsb(nsh) is enough
+        *
+        * The composition of these two barriers is a dsb(DOMAIN), and
+        * the 'nsh' parameter tracks the distinction between
+        * Inner-Shareable and Non-Shareable, as specified by the
+        * callers.
+        */
+       if (nsh)
+               dsb(nsh);
+       else
+               dsb(ish);
+
        if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
                u64 val;
 
@@ -60,10 +83,8 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
 {
        struct tlb_inv_context cxt;
 
-       dsb(ishst);
-
        /* Switch to requested VMID */
-       __tlb_switch_to_guest(mmu, &cxt);
+       __tlb_switch_to_guest(mmu, &cxt, false);
 
        /*
         * We could do so much better if we had the VA as well.
@@ -113,10 +134,8 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
 {
        struct tlb_inv_context cxt;
 
-       dsb(ishst);
-
        /* Switch to requested VMID */
-       __tlb_switch_to_guest(mmu, &cxt);
+       __tlb_switch_to_guest(mmu, &cxt, false);
 
        __tlbi(vmalls12e1is);
        dsb(ish);
@@ -130,7 +149,7 @@ void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
        struct tlb_inv_context cxt;
 
        /* Switch to requested VMID */
-       __tlb_switch_to_guest(mmu, &cxt);
+       __tlb_switch_to_guest(mmu, &cxt, false);
 
        __tlbi(vmalle1);
        asm volatile("ic iallu");
@@ -142,7 +161,8 @@ void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
 
 void __kvm_flush_vm_context(void)
 {
-       dsb(ishst);
+       /* Same remark as in __tlb_switch_to_guest() */
+       dsb(ish);
        __tlbi(alle1is);
 
        /*