KVM: arm/arm64: vgic-its: Cache successful MSI->LPI translation
authorMarc Zyngier <maz@kernel.org>
Mon, 18 Mar 2019 10:17:39 +0000 (10:17 +0000)
committerMarc Zyngier <maz@kernel.org>
Sun, 18 Aug 2019 17:38:52 +0000 (18:38 +0100)
On a successful translation, preserve the parameters in the LPI
translation cache. Each translation is reusing the last slot
in the list, naturally evicting the least recently used entry.

Tested-by: Andre Przywara <andre.przywara@arm.com>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
virt/kvm/arm/vgic/vgic-its.c

index d3e90a9d0a7a4a66bac2c549cdc536fd8d1d57ef..e61d3ea0ab40c4ef6a42a1e54df808b240751cf1 100644 (file)
@@ -535,6 +535,90 @@ static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
        return 0;
 }
 
+static struct vgic_irq *__vgic_its_check_cache(struct vgic_dist *dist,
+                                              phys_addr_t db,
+                                              u32 devid, u32 eventid)
+{
+       struct vgic_translation_cache_entry *cte;
+
+       list_for_each_entry(cte, &dist->lpi_translation_cache, entry) {
+               /*
+                * If we hit a NULL entry, there is nothing after this
+                * point.
+                */
+               if (!cte->irq)
+                       break;
+
+               if (cte->db != db || cte->devid != devid ||
+                   cte->eventid != eventid)
+                       continue;
+
+               /*
+                * Move this entry to the head, as it is the most
+                * recently used.
+                */
+               if (!list_is_first(&cte->entry, &dist->lpi_translation_cache))
+                       list_move(&cte->entry, &dist->lpi_translation_cache);
+
+               return cte->irq;
+       }
+
+       return NULL;
+}
+
+static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
+                                      u32 devid, u32 eventid,
+                                      struct vgic_irq *irq)
+{
+       struct vgic_dist *dist = &kvm->arch.vgic;
+       struct vgic_translation_cache_entry *cte;
+       unsigned long flags;
+       phys_addr_t db;
+
+       /* Do not cache a directly injected interrupt */
+       if (irq->hw)
+               return;
+
+       raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
+
+       if (unlikely(list_empty(&dist->lpi_translation_cache)))
+               goto out;
+
+       /*
+        * We could have raced with another CPU caching the same
+        * translation behind our back, so let's check it is not in
+        * already
+        */
+       db = its->vgic_its_base + GITS_TRANSLATER;
+       if (__vgic_its_check_cache(dist, db, devid, eventid))
+               goto out;
+
+       /* Always reuse the last entry (LRU policy) */
+       cte = list_last_entry(&dist->lpi_translation_cache,
+                             typeof(*cte), entry);
+
+       /*
+        * Caching the translation implies having an extra reference
+        * to the interrupt, so drop the potential reference on what
+        * was in the cache, and increment it on the new interrupt.
+        */
+       if (cte->irq)
+               __vgic_put_lpi_locked(kvm, cte->irq);
+
+       vgic_get_irq_kref(irq);
+
+       cte->db         = db;
+       cte->devid      = devid;
+       cte->eventid    = eventid;
+       cte->irq        = irq;
+
+       /* Move the new translation to the head of the list */
+       list_move(&cte->entry, &dist->lpi_translation_cache);
+
+out:
+       raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+}
+
 void vgic_its_invalidate_cache(struct kvm *kvm)
 {
        struct vgic_dist *dist = &kvm->arch.vgic;
@@ -578,6 +662,8 @@ int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
        if (!vcpu->arch.vgic_cpu.lpis_enabled)
                return -EBUSY;
 
+       vgic_its_cache_translation(kvm, its, devid, eventid, ite->irq);
+
        *irq = ite->irq;
        return 0;
 }