arm64: kvm: Modernize __smccc_workaround_1_smc_start annotations
authorMark Brown <broonie@kernel.org>
Tue, 18 Feb 2020 19:58:39 +0000 (19:58 +0000)
committerCatalin Marinas <catalin.marinas@arm.com>
Mon, 9 Mar 2020 17:35:43 +0000 (17:35 +0000)
In an effort to clarify and simplify the annotation of assembly functions
in the kernel new macros have been introduced. These replace ENTRY and
ENDPROC with separate annotations for standard C callable functions,
data and code with different calling conventions.

Using these for __smccc_workaround_1_smc is more involved than for most
symbols as this symbol is annotated quite unusually, rather than just have
the explicit symbol we define _start and _end symbols which we then use to
compute the length. This does not play at all nicely with the new style
macros. Instead define a constant for the size of the function and use that
in both the C code and for .org based size checks in the assembly code.

Signed-off-by: Mark Brown <broonie@kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Marc Zyngier <maz@kernel.org>
arch/arm64/include/asm/kvm_asm.h
arch/arm64/kernel/cpu_errata.c
arch/arm64/kvm/hyp/hyp-entry.S

index 44a2437..7c7eeea 100644 (file)
@@ -36,6 +36,8 @@
  */
 #define KVM_VECTOR_PREAMBLE    (2 * AARCH64_INSN_SIZE)
 
+#define __SMCCC_WORKAROUND_1_SMC_SZ 36
+
 #ifndef __ASSEMBLY__
 
 #include <linux/mm.h>
@@ -75,6 +77,8 @@ extern void __vgic_v3_init_lrs(void);
 
 extern u32 __kvm_get_mdcr_el2(void);
 
+extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
+
 /* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
 #define __hyp_this_cpu_ptr(sym)                                                \
        ({                                                              \
index 0af2201..6a2ca33 100644 (file)
@@ -11,6 +11,7 @@
 #include <asm/cpu.h>
 #include <asm/cputype.h>
 #include <asm/cpufeature.h>
+#include <asm/kvm_asm.h>
 #include <asm/smp_plat.h>
 
 static bool __maybe_unused
@@ -113,9 +114,6 @@ atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
 
 #ifdef CONFIG_KVM_INDIRECT_VECTORS
-extern char __smccc_workaround_1_smc_start[];
-extern char __smccc_workaround_1_smc_end[];
-
 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
                                const char *hyp_vecs_end)
 {
@@ -163,9 +161,6 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn,
        raw_spin_unlock(&bp_lock);
 }
 #else
-#define __smccc_workaround_1_smc_start         NULL
-#define __smccc_workaround_1_smc_end           NULL
-
 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
                                      const char *hyp_vecs_start,
                                      const char *hyp_vecs_end)
@@ -239,11 +234,14 @@ static int detect_harden_bp_fw(void)
                smccc_end = NULL;
                break;
 
+#if IS_ENABLED(CONFIG_KVM_ARM_HOST)
        case SMCCC_CONDUIT_SMC:
                cb = call_smc_arch_workaround_1;
-               smccc_start = __smccc_workaround_1_smc_start;
-               smccc_end = __smccc_workaround_1_smc_end;
+               smccc_start = __smccc_workaround_1_smc;
+               smccc_end = __smccc_workaround_1_smc +
+                       __SMCCC_WORKAROUND_1_SMC_SZ;
                break;
+#endif
 
        default:
                return -1;
index 1e2ab92..c2a13ab 100644 (file)
@@ -322,7 +322,7 @@ SYM_CODE_END(__bp_harden_hyp_vecs)
 
        .popsection
 
-ENTRY(__smccc_workaround_1_smc_start)
+SYM_CODE_START(__smccc_workaround_1_smc)
        esb
        sub     sp, sp, #(8 * 4)
        stp     x2, x3, [sp, #(8 * 0)]
@@ -332,5 +332,7 @@ ENTRY(__smccc_workaround_1_smc_start)
        ldp     x2, x3, [sp, #(8 * 0)]
        ldp     x0, x1, [sp, #(8 * 2)]
        add     sp, sp, #(8 * 4)
-ENTRY(__smccc_workaround_1_smc_end)
+1:     .org __smccc_workaround_1_smc + __SMCCC_WORKAROUND_1_SMC_SZ
+       .org 1b
+SYM_CODE_END(__smccc_workaround_1_smc)
 #endif