arm64: smp: Don't enter kernel with NULL stack pointer or task struct
authorWill Deacon <will@kernel.org>
Tue, 27 Aug 2019 13:36:38 +0000 (14:36 +0100)
committerWill Deacon <will@kernel.org>
Tue, 27 Aug 2019 16:37:02 +0000 (17:37 +0100)
Although SMP bringup is inherently racy, we can significantly reduce
the window during which secondary CPUs can unexpectedly enter the
kernel by sanity checking the 'stack' and 'task' fields of the
'secondary_data' structure. If the booting CPU gave up waiting for us,
then they will have been cleared to NULL and we should spin in a WFE; WFI
loop instead.

Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/kernel/head.S
arch/arm64/kernel/smp.c

index 2cdacd1..0baadf3 100644 (file)
@@ -724,14 +724,22 @@ __secondary_switched:
 
        adr_l   x0, secondary_data
        ldr     x1, [x0, #CPU_BOOT_STACK]       // get secondary_data.stack
+       cbz     x1, __secondary_too_slow
        mov     sp, x1
        ldr     x2, [x0, #CPU_BOOT_TASK]
+       cbz     x2, __secondary_too_slow
        msr     sp_el0, x2
        mov     x29, #0
        mov     x30, #0
        b       secondary_start_kernel
 ENDPROC(__secondary_switched)
 
+__secondary_too_slow:
+       wfe
+       wfi
+       b       __secondary_too_slow
+ENDPROC(__secondary_too_slow)
+
 /*
  * The booting CPU updates the failed status @__early_cpu_boot_status,
  * with MMU turned off.
index 63c7a76..1f8aeb7 100644 (file)
@@ -136,6 +136,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
 
        secondary_data.task = NULL;
        secondary_data.stack = NULL;
+       __flush_dcache_area(&secondary_data, sizeof(secondary_data));
        status = READ_ONCE(secondary_data.status);
        if (ret && status) {