arm64: alternative: Allow alternative status checking per cpufeature
authorJulien Thierry <julien.thierry@arm.com>
Thu, 31 Jan 2019 14:58:52 +0000 (14:58 +0000)
committerCatalin Marinas <catalin.marinas@arm.com>
Wed, 6 Feb 2019 10:05:20 +0000 (10:05 +0000)
In preparation for the application of alternatives at different points
during the boot process, provide the possibility to check whether
alternatives for a feature of interest was already applied instead of
having a global boolean for all alternatives.

Make VHE enablement code check for the VHE feature instead of considering
all alternatives.

Signed-off-by: Julien Thierry <julien.thierry@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
Cc: Marc Zyngier <Marc.Zyngier@arm.com>
Cc: Christoffer Dall <Christoffer.Dall@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/alternative.h
arch/arm64/kernel/alternative.c
arch/arm64/kernel/cpufeature.c

index 4b650ec..9806a23 100644 (file)
@@ -14,8 +14,6 @@
 #include <linux/stddef.h>
 #include <linux/stringify.h>
 
-extern int alternatives_applied;
-
 struct alt_instr {
        s32 orig_offset;        /* offset to original instruction */
        s32 alt_offset;         /* offset to replacement instruction */
@@ -28,6 +26,7 @@ typedef void (*alternative_cb_t)(struct alt_instr *alt,
                                 __le32 *origptr, __le32 *updptr, int nr_inst);
 
 void __init apply_alternatives_all(void);
+bool alternative_is_applied(u16 cpufeature);
 
 #ifdef CONFIG_MODULES
 void apply_alternatives_module(void *start, size_t length);
index b5d6039..c947d22 100644 (file)
 #define ALT_ORIG_PTR(a)                __ALT_PTR(a, orig_offset)
 #define ALT_REPL_PTR(a)                __ALT_PTR(a, alt_offset)
 
-int alternatives_applied;
+static int all_alternatives_applied;
+
+static DECLARE_BITMAP(applied_alternatives, ARM64_NCAPS);
 
 struct alt_region {
        struct alt_instr *begin;
        struct alt_instr *end;
 };
 
+bool alternative_is_applied(u16 cpufeature)
+{
+       if (WARN_ON(cpufeature >= ARM64_NCAPS))
+               return false;
+
+       return test_bit(cpufeature, applied_alternatives);
+}
+
 /*
  * Check if the target PC is within an alternative block.
  */
@@ -192,6 +202,9 @@ static void __apply_alternatives(void *alt_region, bool is_module)
                dsb(ish);
                __flush_icache_all();
                isb();
+
+               /* We applied all that was available */
+               bitmap_copy(applied_alternatives, cpu_hwcaps, ARM64_NCAPS);
        }
 }
 
@@ -208,14 +221,14 @@ static int __apply_alternatives_multi_stop(void *unused)
 
        /* We always have a CPU 0 at this point (__init) */
        if (smp_processor_id()) {
-               while (!READ_ONCE(alternatives_applied))
+               while (!READ_ONCE(all_alternatives_applied))
                        cpu_relax();
                isb();
        } else {
-               BUG_ON(alternatives_applied);
+               BUG_ON(all_alternatives_applied);
                __apply_alternatives(&region, false);
                /* Barriers provided by the cache flushing */
-               WRITE_ONCE(alternatives_applied, 1);
+               WRITE_ONCE(all_alternatives_applied, 1);
        }
 
        return 0;
index 6f56e0a..d607ea3 100644 (file)
@@ -1118,7 +1118,7 @@ static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
         * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
         * do anything here.
         */
-       if (!alternatives_applied)
+       if (!alternative_is_applied(ARM64_HAS_VIRT_HOST_EXTN))
                write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
 }
 #endif