Revert "arm64: initialize per-cpu offsets earlier"
authorWill Deacon <will@kernel.org>
Fri, 9 Oct 2020 10:24:17 +0000 (11:24 +0100)
committerWill Deacon <will@kernel.org>
Fri, 9 Oct 2020 10:24:17 +0000 (11:24 +0100)
This reverts commit 353e228eb355be5a65a3c0996c774a0f46737fda.

Qian Cai reports that TX2 no longer boots with his .config as it appears
that task_cpu() gets instrumented and used before KASAN has been
initialised.

Although Mark has a proposed fix, let's take the safe option of reverting
this for now and sorting it out properly later.

Link: https://lore.kernel.org/r/711bc57a314d8d646b41307008db2845b7537b3d.camel@redhat.com
Reported-by: Qian Cai <cai@redhat.com>
Tested-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/include/asm/cpu.h
arch/arm64/kernel/head.S
arch/arm64/kernel/setup.c
arch/arm64/kernel/smp.c

index d9d60b1..7faae6f 100644 (file)
@@ -68,6 +68,4 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info);
 void update_cpu_features(int cpu, struct cpuinfo_arm64 *info,
                                 struct cpuinfo_arm64 *boot);
 
-void init_this_cpu_offset(void);
-
 #endif /* __ASM_CPU_H */
index e28c9d4..d8d9caf 100644 (file)
@@ -448,8 +448,6 @@ SYM_FUNC_START_LOCAL(__primary_switched)
        bl      __pi_memset
        dsb     ishst                           // Make zero page visible to PTW
 
-       bl      init_this_cpu_offset
-
 #ifdef CONFIG_KASAN
        bl      kasan_early_init
 #endif
@@ -756,7 +754,6 @@ SYM_FUNC_START_LOCAL(__secondary_switched)
        ptrauth_keys_init_cpu x2, x3, x4, x5
 #endif
 
-       bl      init_this_cpu_offset
        b       secondary_start_kernel
 SYM_FUNC_END(__secondary_switched)
 
index 0051719..77c4c9b 100644 (file)
@@ -87,6 +87,12 @@ void __init smp_setup_processor_id(void)
        u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
        set_cpu_logical_map(0, mpidr);
 
+       /*
+        * clear __my_cpu_offset on boot CPU to avoid hang caused by
+        * using percpu variable early, for example, lockdep will
+        * access percpu variable inside lock_release
+        */
+       set_my_cpu_offset(0);
        pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
                (unsigned long)mpidr, read_cpuid_id());
 }
@@ -276,12 +282,6 @@ u64 cpu_logical_map(int cpu)
 }
 EXPORT_SYMBOL_GPL(cpu_logical_map);
 
-void noinstr init_this_cpu_offset(void)
-{
-       unsigned int cpu = task_cpu(current);
-       set_my_cpu_offset(per_cpu_offset(cpu));
-}
-
 void __init __no_sanitize_address setup_arch(char **cmdline_p)
 {
        init_mm.start_code = (unsigned long) _text;
index 7714310..355ee9e 100644 (file)
@@ -192,7 +192,10 @@ asmlinkage notrace void secondary_start_kernel(void)
        u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
        struct mm_struct *mm = &init_mm;
        const struct cpu_operations *ops;
-       unsigned int cpu = smp_processor_id();
+       unsigned int cpu;
+
+       cpu = task_cpu(current);
+       set_my_cpu_offset(per_cpu_offset(cpu));
 
        /*
         * All kernel threads share the same mm context; grab a
@@ -432,13 +435,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
 
 void __init smp_prepare_boot_cpu(void)
 {
-       /*
-        * Now that setup_per_cpu_areas() has allocated the runtime per-cpu
-        * areas it is only safe to read the CPU0 boot-time area, and we must
-        * reinitialize the offset to point to the runtime area.
-        */
-       init_this_cpu_offset();
-
+       set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
        cpuinfo_store_boot_cpu();
 
        /*