From 1b2d3451ee50a0968cb9933f726e50b368ba5073 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 14 Feb 2022 16:52:16 +0000 Subject: [PATCH] arm64: Support PREEMPT_DYNAMIC This patch enables support for PREEMPT_DYNAMIC on arm64, allowing the preemption model to be chosen at boot time. Specifically, this patch selects HAVE_PREEMPT_DYNAMIC_KEY, so that each preemption function is an out-of-line call with an early return depending upon a static key. This leaves almost all the codegen up to the compiler, and side-steps a number of pain points with static calls (e.g. interaction with CFI schemes). This should have no worse overhead than using non-inline static calls, as those use out-of-line trampolines with early returns. For example, the dynamic_cond_resched() wrapper looks as follows when enabled. When disabled, the first `B` is replaced with a `NOP`, resulting in an early return. | : | bti c | b // or `nop` | mov w0, #0x0 | ret | mrs x0, sp_el0 | ldr x0, [x0, #8] | cbnz x0, | paciasp | stp x29, x30, [sp, #-16]! | mov x29, sp | bl | mov w0, #0x1 | ldp x29, x30, [sp], #16 | autiasp | ret ... compared to the regular form of the function: | <__cond_resched>: | bti c | mrs x0, sp_el0 | ldr x1, [x0, #8] | cbz x1, <__cond_resched+0x18> | mov w0, #0x0 | ret | paciasp | stp x29, x30, [sp, #-16]! | mov x29, sp | bl | mov w0, #0x1 | ldp x29, x30, [sp], #16 | autiasp | ret Since arm64 does not yet use the generic entry code, we must define our own `sk_dynamic_irqentry_exit_cond_resched`, which will be enabled/disabled by the common code in kernel/sched/core.c. All other preemption functions and associated static keys are defined there. Signed-off-by: Mark Rutland Signed-off-by: Peter Zijlstra (Intel) Acked-by: Ard Biesheuvel Acked-by: Catalin Marinas Acked-by: Frederic Weisbecker Link: https://lore.kernel.org/r/20220214165216.2231574-8-mark.rutland@arm.com --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/preempt.h | 19 +++++++++++++++++-- arch/arm64/kernel/entry-common.c | 10 +++++++++- 3 files changed, 27 insertions(+), 3 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 6978140..7d0f0cd 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -192,6 +192,7 @@ config ARM64 select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_PREEMPT_DYNAMIC_KEY select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_POSIX_CPU_TIMERS_TASK_WORK select HAVE_FUNCTION_ARG_ACCESS_API diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h index e83f098..0159b62 100644 --- a/arch/arm64/include/asm/preempt.h +++ b/arch/arm64/include/asm/preempt.h @@ -2,6 +2,7 @@ #ifndef __ASM_PREEMPT_H #define __ASM_PREEMPT_H +#include #include #define PREEMPT_NEED_RESCHED BIT(32) @@ -80,10 +81,24 @@ static inline bool should_resched(int preempt_offset) } #ifdef CONFIG_PREEMPTION + void preempt_schedule(void); -#define __preempt_schedule() preempt_schedule() void preempt_schedule_notrace(void); -#define __preempt_schedule_notrace() preempt_schedule_notrace() + +#ifdef CONFIG_PREEMPT_DYNAMIC + +DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched); +void dynamic_preempt_schedule(void); +#define __preempt_schedule() dynamic_preempt_schedule() +void dynamic_preempt_schedule_notrace(void); +#define __preempt_schedule_notrace() dynamic_preempt_schedule_notrace() + +#else /* CONFIG_PREEMPT_DYNAMIC */ + +#define __preempt_schedule() preempt_schedule() +#define __preempt_schedule_notrace() preempt_schedule_notrace() + +#endif /* CONFIG_PREEMPT_DYNAMIC */ #endif /* CONFIG_PREEMPTION */ #endif /* __ASM_PREEMPT_H */ diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 2c639b6..675352e 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -220,9 +220,17 @@ static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs) lockdep_hardirqs_on(CALLER_ADDR0); } +#ifdef CONFIG_PREEMPT_DYNAMIC +DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched); +#define need_irq_preemption() \ + (static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched)) +#else +#define need_irq_preemption() (IS_ENABLED(CONFIG_PREEMPTION)) +#endif + static void __sched arm64_preempt_schedule_irq(void) { - if (!IS_ENABLED(CONFIG_PREEMPTION)) + if (!need_irq_preemption()) return; /* -- 2.7.4