From 8e334d729bc4787f728e9e5abc91649f131124ff Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 20 May 2021 12:50:30 +0100 Subject: [PATCH] arm64: smp: unify task and sp setup Once we enable the MMU, we have to initialize: * SP_EL0 to point at the active task * SP to point at the active task's stack * SCS_SP to point at the active task's shadow stack For all tasks (including init_task), this information can be derived from the task's task_struct. Let's unify __primary_switched and __secondary_switched to consistently acquire this information from the relevant task_struct. At the same time, let's fold this together with initializing a task's final frame. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Ard Biesheuvel Cc: Catalin Marinas Cc: James Morse Cc: Marc Zyngier Cc: Suzuki Poulose Cc: Will Deacon Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20210520115031.18509-6-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/kernel/head.S | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 9be95e1..e83b289 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -395,15 +395,24 @@ SYM_FUNC_START_LOCAL(__create_page_tables) SYM_FUNC_END(__create_page_tables) /* + * Initialize CPU registers with task-specific and cpu-specific context. + * * Create a final frame record at task_pt_regs(current)->stackframe, so * that the unwinder can identify the final frame record of any task by * its location in the task stack. We reserve the entire pt_regs space * for consistency with user tasks and kthreads. */ - .macro setup_final_frame + .macro init_cpu_task tsk, tmp + msr sp_el0, \tsk + + ldr \tmp, [\tsk, #TSK_STACK] + add sp, \tmp, #THREAD_SIZE sub sp, sp, #PT_REGS_SIZE + stp xzr, xzr, [sp, #S_STACKFRAME] add x29, sp, #S_STACKFRAME + + scs_load \tsk, \tmp .endm /* @@ -412,22 +421,16 @@ SYM_FUNC_END(__create_page_tables) * x0 = __PHYS_OFFSET */ SYM_FUNC_START_LOCAL(__primary_switched) - adrp x4, init_thread_union - add sp, x4, #THREAD_SIZE - adr_l x5, init_task - msr sp_el0, x5 // Save thread_info + adr_l x4, init_task + init_cpu_task x4, x5 adr_l x8, vectors // load VBAR_EL1 with virtual msr vbar_el1, x8 // vector table address isb - stp xzr, x30, [sp, #-16]! + stp x29, x30, [sp, #-16]! mov x29, sp -#ifdef CONFIG_SHADOW_CALL_STACK - adr_l scs_sp, init_shadow_call_stack // Set shadow call stack -#endif - str_l x21, __fdt_pointer, x5 // Save FDT pointer ldr_l x4, kimage_vaddr // Save the offset between @@ -459,8 +462,7 @@ SYM_FUNC_START_LOCAL(__primary_switched) 0: #endif bl switch_to_vhe // Prefer VHE if possible - add sp, sp, #16 - setup_final_frame + ldp x29, x30, [sp], #16 bl start_kernel ASM_BUG() SYM_FUNC_END(__primary_switched) @@ -648,12 +650,7 @@ SYM_FUNC_START_LOCAL(__secondary_switched) ldr x2, [x0, #CPU_BOOT_TASK] cbz x2, __secondary_too_slow - ldr x1, [x2, #TSK_STACK] - add sp, x1, #THREAD_SIZE - - msr sp_el0, x2 - scs_load x2, x3 - setup_final_frame + init_cpu_task x2, x1 #ifdef CONFIG_ARM64_PTR_AUTH ptrauth_keys_init_cpu x2, x3, x4, x5 -- 2.7.4