ARM: KVM: invalidate BTB on guest exit for Cortex-A12/A17
authorMarc Zyngier <marc.zyngier@arm.com>
Wed, 7 Nov 2018 16:43:49 +0000 (11:43 -0500)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 23 Nov 2018 07:20:38 +0000 (08:20 +0100)
Commit 3f7e8e2e1ebda787f156ce46e3f0a9ce2833fa4f upstream.

In order to avoid aliasing attacks against the branch predictor,
let's invalidate the BTB on guest exit. This is made complicated
by the fact that we cannot take a branch before invalidating the
BTB.

We only apply this to A12 and A17, which are the only two ARM
cores on which this useful.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
Boot-tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Tony Lindgren <tony@atomide.com>
Signed-off-by: David A. Long <dave.long@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm/include/asm/kvm_asm.h
arch/arm/include/asm/kvm_mmu.h
arch/arm/kvm/hyp/hyp-entry.S

index 8ef05381984b1b6ba977035c82607423b37835c0..24f3ec7c9fbe0b2c40f2af0aa883f5c5a2bb90b3 100644 (file)
@@ -61,8 +61,6 @@ struct kvm_vcpu;
 extern char __kvm_hyp_init[];
 extern char __kvm_hyp_init_end[];
 
-extern char __kvm_hyp_vector[];
-
 extern void __kvm_flush_vm_context(void);
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
index e2f05cedaf97837ea97d44636d13e89b3c3f91a8..625edef2a54f46aa5daa477c72ae25bdcb858370 100644 (file)
@@ -248,7 +248,22 @@ static inline int kvm_read_guest_lock(struct kvm *kvm,
 
 static inline void *kvm_get_hyp_vector(void)
 {
-       return kvm_ksym_ref(__kvm_hyp_vector);
+       switch(read_cpuid_part()) {
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+       case ARM_CPU_PART_CORTEX_A12:
+       case ARM_CPU_PART_CORTEX_A17:
+       {
+               extern char __kvm_hyp_vector_bp_inv[];
+               return kvm_ksym_ref(__kvm_hyp_vector_bp_inv);
+       }
+
+#endif
+       default:
+       {
+               extern char __kvm_hyp_vector[];
+               return kvm_ksym_ref(__kvm_hyp_vector);
+       }
+       }
 }
 
 static inline int kvm_map_vectors(void)
index 96beb53934c9769d13f7c58f5e574a065638c42c..58ec002721a1845ab858b9c2f72bbc5372487b14 100644 (file)
@@ -71,6 +71,66 @@ __kvm_hyp_vector:
        W(b)    hyp_irq
        W(b)    hyp_fiq
 
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+       .align 5
+__kvm_hyp_vector_bp_inv:
+       .global __kvm_hyp_vector_bp_inv
+
+       /*
+        * We encode the exception entry in the bottom 3 bits of
+        * SP, and we have to guarantee to be 8 bytes aligned.
+        */
+       W(add)  sp, sp, #1      /* Reset          7 */
+       W(add)  sp, sp, #1      /* Undef          6 */
+       W(add)  sp, sp, #1      /* Syscall        5 */
+       W(add)  sp, sp, #1      /* Prefetch abort 4 */
+       W(add)  sp, sp, #1      /* Data abort     3 */
+       W(add)  sp, sp, #1      /* HVC            2 */
+       W(add)  sp, sp, #1      /* IRQ            1 */
+       W(nop)                  /* FIQ            0 */
+
+       mcr     p15, 0, r0, c7, c5, 6   /* BPIALL */
+       isb
+
+#ifdef CONFIG_THUMB2_KERNEL
+       /*
+        * Yet another silly hack: Use VPIDR as a temp register.
+        * Thumb2 is really a pain, as SP cannot be used with most
+        * of the bitwise instructions. The vect_br macro ensures
+        * things gets cleaned-up.
+        */
+       mcr     p15, 4, r0, c0, c0, 0   /* VPIDR */
+       mov     r0, sp
+       and     r0, r0, #7
+       sub     sp, sp, r0
+       push    {r1, r2}
+       mov     r1, r0
+       mrc     p15, 4, r0, c0, c0, 0   /* VPIDR */
+       mrc     p15, 0, r2, c0, c0, 0   /* MIDR  */
+       mcr     p15, 4, r2, c0, c0, 0   /* VPIDR */
+#endif
+
+.macro vect_br val, targ
+ARM(   eor     sp, sp, #\val   )
+ARM(   tst     sp, #7          )
+ARM(   eorne   sp, sp, #\val   )
+
+THUMB( cmp     r1, #\val       )
+THUMB( popeq   {r1, r2}        )
+
+       beq     \targ
+.endm
+
+       vect_br 0, hyp_fiq
+       vect_br 1, hyp_irq
+       vect_br 2, hyp_hvc
+       vect_br 3, hyp_dabt
+       vect_br 4, hyp_pabt
+       vect_br 5, hyp_svc
+       vect_br 6, hyp_undef
+       vect_br 7, hyp_reset
+#endif
+
 .macro invalid_vector label, cause
        .align
 \label:        mov     r0, #\cause
@@ -131,7 +191,14 @@ hyp_hvc:
        mrceq   p15, 4, r0, c12, c0, 0  @ get HVBAR
        beq     1f
 
-       push    {lr}
+       /*
+        * Pushing r2 here is just a way of keeping the stack aligned to
+        * 8 bytes on any path that can trigger a HYP exception. Here,
+        * we may well be about to jump into the guest, and the guest
+        * exit would otherwise be badly decoded by our fancy
+        * "decode-exception-without-a-branch" code...
+        */
+       push    {r2, lr}
 
        mov     lr, r0
        mov     r0, r1
@@ -141,7 +208,7 @@ hyp_hvc:
 THUMB( orr     lr, #1)
        blx     lr                      @ Call the HYP function
 
-       pop     {lr}
+       pop     {r2, lr}
 1:     eret
 
 guest_trap: