riscv: stack: Support HAVE_IRQ_EXIT_ON_IRQ_STACK
authorGuo Ren <guoren@linux.alibaba.com>
Wed, 14 Jun 2023 01:30:16 +0000 (21:30 -0400)
committerPalmer Dabbelt <palmer@rivosinc.com>
Thu, 22 Jun 2023 17:38:35 +0000 (10:38 -0700)
Add independent irq stacks for percpu to prevent kernel stack overflows.
It is also compatible with VMAP_STACK by arch_alloc_vmap_stack.

Tested-by: Jisheng Zhang <jszhang@kernel.org>
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
Cc: Clément Léger <cleger@rivosinc.com>
Link: https://lore.kernel.org/r/20230614013018.2168426-2-guoren@kernel.org
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
arch/riscv/Kconfig
arch/riscv/include/asm/irq_stack.h [new file with mode: 0644]
arch/riscv/include/asm/thread_info.h
arch/riscv/kernel/irq.c
arch/riscv/kernel/traps.c

index a3d54cd..a8368fe 100644 (file)
@@ -590,6 +590,13 @@ config FPU
 
          If you don't know what to do here, say Y.
 
+config IRQ_STACKS
+       bool "Independent irq stacks" if EXPERT
+       default y
+       select HAVE_IRQ_EXIT_ON_IRQ_STACK
+       help
+         Add independent irq stacks for percpu to prevent kernel stack overflows.
+
 endmenu # "Platform type"
 
 menu "Kernel features"
diff --git a/arch/riscv/include/asm/irq_stack.h b/arch/riscv/include/asm/irq_stack.h
new file mode 100644 (file)
index 0000000..e4042d2
--- /dev/null
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_IRQ_STACK_H
+#define _ASM_RISCV_IRQ_STACK_H
+
+#include <linux/bug.h>
+#include <linux/gfp.h>
+#include <linux/kconfig.h>
+#include <linux/vmalloc.h>
+#include <linux/pgtable.h>
+#include <asm/thread_info.h>
+
+DECLARE_PER_CPU(ulong *, irq_stack_ptr);
+
+#ifdef CONFIG_VMAP_STACK
+/*
+ * To ensure that VMAP'd stack overflow detection works correctly, all VMAP'd
+ * stacks need to have the same alignment.
+ */
+static inline unsigned long *arch_alloc_vmap_stack(size_t stack_size, int node)
+{
+       void *p;
+
+       p = __vmalloc_node(stack_size, THREAD_ALIGN, THREADINFO_GFP, node,
+                       __builtin_return_address(0));
+       return kasan_reset_tag(p);
+}
+#endif /* CONFIG_VMAP_STACK */
+
+#endif /* _ASM_RISCV_IRQ_STACK_H */
index 97e6f65..2f32875 100644 (file)
@@ -40,6 +40,8 @@
 #define OVERFLOW_STACK_SIZE     SZ_4K
 #define SHADOW_OVERFLOW_STACK_SIZE (1024)
 
+#define IRQ_STACK_SIZE         THREAD_SIZE
+
 #ifndef __ASSEMBLY__
 
 extern long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE / sizeof(long)];
index eb9a68a..a1dcf8e 100644 (file)
@@ -28,6 +28,38 @@ struct fwnode_handle *riscv_get_intc_hwnode(void)
 }
 EXPORT_SYMBOL_GPL(riscv_get_intc_hwnode);
 
+#ifdef CONFIG_IRQ_STACKS
+#include <asm/irq_stack.h>
+
+DEFINE_PER_CPU(ulong *, irq_stack_ptr);
+
+#ifdef CONFIG_VMAP_STACK
+static void init_irq_stacks(void)
+{
+       int cpu;
+       ulong *p;
+
+       for_each_possible_cpu(cpu) {
+               p = arch_alloc_vmap_stack(IRQ_STACK_SIZE, cpu_to_node(cpu));
+               per_cpu(irq_stack_ptr, cpu) = p;
+       }
+}
+#else
+/* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */
+DEFINE_PER_CPU_ALIGNED(ulong [IRQ_STACK_SIZE/sizeof(ulong)], irq_stack);
+
+static void init_irq_stacks(void)
+{
+       int cpu;
+
+       for_each_possible_cpu(cpu)
+               per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack, cpu);
+}
+#endif /* CONFIG_VMAP_STACK */
+#else
+static void init_irq_stacks(void) {}
+#endif /* CONFIG_IRQ_STACKS */
+
 int arch_show_interrupts(struct seq_file *p, int prec)
 {
        show_ipi_stats(p, prec);
@@ -36,6 +68,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
 
 void __init init_IRQ(void)
 {
+       init_irq_stacks();
        irqchip_init();
        if (!handle_arch_irq)
                panic("No interrupt controller found.");
index 05ffdcd..5158961 100644 (file)
@@ -27,6 +27,7 @@
 #include <asm/syscall.h>
 #include <asm/thread_info.h>
 #include <asm/vector.h>
+#include <asm/irq_stack.h>
 
 int show_unhandled_signals = 1;
 
@@ -327,16 +328,46 @@ asmlinkage __visible noinstr void do_page_fault(struct pt_regs *regs)
 }
 #endif
 
-asmlinkage __visible noinstr void do_irq(struct pt_regs *regs)
+static void noinstr handle_riscv_irq(struct pt_regs *regs)
 {
        struct pt_regs *old_regs;
-       irqentry_state_t state = irqentry_enter(regs);
 
        irq_enter_rcu();
        old_regs = set_irq_regs(regs);
        handle_arch_irq(regs);
        set_irq_regs(old_regs);
        irq_exit_rcu();
+}
+
+asmlinkage void noinstr do_irq(struct pt_regs *regs)
+{
+       irqentry_state_t state = irqentry_enter(regs);
+#ifdef CONFIG_IRQ_STACKS
+       if (on_thread_stack()) {
+               ulong *sp = per_cpu(irq_stack_ptr, smp_processor_id())
+                                       + IRQ_STACK_SIZE/sizeof(ulong);
+               __asm__ __volatile(
+               "addi   sp, sp, -"RISCV_SZPTR  "\n"
+               REG_S"  ra, (sp)                \n"
+               "addi   sp, sp, -"RISCV_SZPTR  "\n"
+               REG_S"  s0, (sp)                \n"
+               "addi   s0, sp, 2*"RISCV_SZPTR "\n"
+               "move   sp, %[sp]               \n"
+               "move   a0, %[regs]             \n"
+               "call   handle_riscv_irq        \n"
+               "addi   sp, s0, -2*"RISCV_SZPTR"\n"
+               REG_L"  s0, (sp)                \n"
+               "addi   sp, sp, "RISCV_SZPTR   "\n"
+               REG_L"  ra, (sp)                \n"
+               "addi   sp, sp, "RISCV_SZPTR   "\n"
+               :
+               : [sp] "r" (sp), [regs] "r" (regs)
+               : "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
+                 "t0", "t1", "t2", "t3", "t4", "t5", "t6",
+                 "memory");
+       } else
+#endif
+               handle_riscv_irq(regs);
 
        irqentry_exit(regs, state);
 }