LoongArch: Add basic STACKPROTECTOR support
authorHuacai Chen <chenhuacai@loongson.cn>
Sat, 10 Dec 2022 14:40:15 +0000 (22:40 +0800)
committerHuacai Chen <chenhuacai@loongson.cn>
Wed, 14 Dec 2022 00:41:53 +0000 (08:41 +0800)
Add basic stack protector support similar to other architectures. A
constant canary value is set at boot time, and with help of compiler's
-fstack-protector we can detect stack corruption.

Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
arch/loongarch/Kconfig
arch/loongarch/include/asm/stackprotector.h [new file with mode: 0644]
arch/loongarch/kernel/asm-offsets.c
arch/loongarch/kernel/process.c
arch/loongarch/kernel/switch.S

index 576a649..28d827c 100644 (file)
@@ -103,6 +103,7 @@ config LOONGARCH
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_RSEQ
        select HAVE_SETUP_PER_CPU_AREA if NUMA
+       select HAVE_STACKPROTECTOR
        select HAVE_SYSCALL_TRACEPOINTS
        select HAVE_TIF_NOHZ
        select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
diff --git a/arch/loongarch/include/asm/stackprotector.h b/arch/loongarch/include/asm/stackprotector.h
new file mode 100644 (file)
index 0000000..a1a9657
--- /dev/null
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GCC stack protector support.
+ *
+ * Stack protector works by putting predefined pattern at the start of
+ * the stack frame and verifying that it hasn't been overwritten when
+ * returning from the function. The pattern is called stack canary and
+ * on LoongArch gcc expects it to be defined by a global variable called
+ * "__stack_chk_guard".
+ */
+
+#ifndef _ASM_STACKPROTECTOR_H
+#define _ASM_STACKPROTECTOR_H
+
+#include <linux/random.h>
+#include <linux/version.h>
+
+extern unsigned long __stack_chk_guard;
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+       unsigned long canary;
+
+       /* Try to get a semi random initial value. */
+       get_random_bytes(&canary, sizeof(canary));
+       canary ^= LINUX_VERSION_CODE;
+
+       current->stack_canary = canary;
+       __stack_chk_guard = current->stack_canary;
+}
+
+#endif /* _ASM_STACKPROTECTOR_H */
index 4ef4945..4bdb203 100644 (file)
@@ -68,6 +68,9 @@ void output_task_defines(void)
        OFFSET(TASK_FLAGS, task_struct, flags);
        OFFSET(TASK_MM, task_struct, mm);
        OFFSET(TASK_PID, task_struct, pid);
+#if defined(CONFIG_STACKPROTECTOR)
+       OFFSET(TASK_STACK_CANARY, task_struct, stack_canary);
+#endif
        DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
        BLANK();
 }
index ddb8ba4..790cc14 100644 (file)
 #include <asm/unwind.h>
 #include <asm/vdso.h>
 
+#ifdef CONFIG_STACKPROTECTOR
+#include <linux/stackprotector.h>
+unsigned long __stack_chk_guard __read_mostly;
+EXPORT_SYMBOL(__stack_chk_guard);
+#endif
+
 /*
  * Idle related variables and functions
  */
index 202a163..31dd819 100644 (file)
@@ -23,6 +23,11 @@ SYM_FUNC_START(__switch_to)
        stptr.d ra, a0, THREAD_REG01
        stptr.d a3, a0, THREAD_SCHED_RA
        stptr.d a4, a0, THREAD_SCHED_CFA
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
+       la      t7, __stack_chk_guard
+       LONG_L  t8, a1, TASK_STACK_CANARY
+       LONG_S  t8, t7, 0
+#endif
        move    tp, a2
        cpu_restore_nonscratch a1