os_check_bugs();
}
+void apply_ibt_endbr(s32 *start, s32 *end)
+{
+}
+
void apply_retpolines(s32 *start, s32 *end)
{
}
config X86_KERNEL_IBT
prompt "Indirect Branch Tracking"
bool
- depends on X86_64 && CC_HAS_IBT
+ depends on X86_64 && CC_HAS_IBT && STACK_VALIDATION
help
Build the kernel with support for Indirect Branch Tracking, a
hardware support course-grain forward-edge Control Flow Integrity
an ENDBR instruction, as such, the compiler will instrument the
code with them to make this happen.
+ In addition to building the kernel with IBT, seal all functions that
+ are not indirect call targets, avoiding them ever becomming one.
+
+ This requires LTO like objtool runs and will slow down the build. It
+ does significantly reduce the number of ENDBR instructions in the
+ kernel image.
+
config X86_INTEL_MEMORY_PROTECTION_KEYS
prompt "Memory Protection Keys"
def_bool y
extern void alternative_instructions(void);
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
extern void apply_retpolines(s32 *start, s32 *end);
+extern void apply_ibt_endbr(s32 *start, s32 *end);
struct module;
return endbr;
}
+static inline __attribute_const__ u32 gen_endbr_poison(void)
+{
+ /*
+ * 4 byte NOP that isn't NOP4 (in fact it is OSP NOP3), such that it
+ * will be unique to (former) ENDBR sites.
+ */
+ return 0x001f0f66; /* osp nopl (%rax) */
+}
+
static inline bool is_endbr(u32 val)
{
+ if (val == gen_endbr_poison())
+ return true;
+
val &= ~0x01000000U; /* ENDBR32 -> ENDBR64 */
return val == gen_endbr();
}
}
extern s32 __retpoline_sites[], __retpoline_sites_end[];
+extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[];
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
extern s32 __smp_locks[], __smp_locks_end[];
void text_poke_early(void *addr, const void *opcode, size_t len);
#endif /* CONFIG_RETPOLINE && CONFIG_STACK_VALIDATION */
+#ifdef CONFIG_X86_KERNEL_IBT
+
+/*
+ * Generated by: objtool --ibt
+ */
+void __init_or_module noinline apply_ibt_endbr(s32 *start, s32 *end)
+{
+ s32 *s;
+
+ for (s = start; s < end; s++) {
+ u32 endbr, poison = gen_endbr_poison();
+ void *addr = (void *)s + *s;
+
+ if (WARN_ON_ONCE(get_kernel_nofault(endbr, addr)))
+ continue;
+
+ if (WARN_ON_ONCE(!is_endbr(endbr)))
+ continue;
+
+ DPRINTK("ENDBR at: %pS (%px)", addr, addr);
+
+ /*
+ * When we have IBT, the lack of ENDBR will trigger #CP
+ */
+ DUMP_BYTES(((u8*)addr), 4, "%px: orig: ", addr);
+ DUMP_BYTES(((u8*)&poison), 4, "%px: repl: ", addr);
+ text_poke_early(addr, &poison, 4);
+ }
+}
+
+#else
+
+void __init_or_module noinline apply_ibt_endbr(s32 *start, s32 *end) { }
+
+#endif /* CONFIG_X86_KERNEL_IBT */
+
#ifdef CONFIG_SMP
static void alternatives_smp_lock(const s32 *start, const s32 *end,
u8 *text, u8 *text_end)
*/
apply_alternatives(__alt_instructions, __alt_instructions_end);
+ apply_ibt_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end);
+
#ifdef CONFIG_SMP
/* Patch to UP if other cpus not imminent. */
if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
{
const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
*para = NULL, *orc = NULL, *orc_ip = NULL,
- *retpolines = NULL;
+ *retpolines = NULL, *ibt_endbr = NULL;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
orc_ip = s;
if (!strcmp(".retpoline_sites", secstrings + s->sh_name))
retpolines = s;
+ if (!strcmp(".ibt_endbr_seal", secstrings + s->sh_name))
+ ibt_endbr = s;
}
/*
void *aseg = (void *)alt->sh_addr;
apply_alternatives(aseg, aseg + alt->sh_size);
}
+ if (ibt_endbr) {
+ void *iseg = (void *)ibt_endbr->sh_addr;
+ apply_ibt_endbr(iseg, iseg + ibt_endbr->sh_size);
+ }
if (locks && text) {
void *lseg = (void *)locks->sh_addr;
void *tseg = (void *)text->sh_addr;
targets-for-builtin += $(obj)/built-in.a
endif
-targets-for-modules := $(patsubst %.o, %.mod, $(filter %.o, $(obj-m)))
+targets-for-modules :=
ifdef CONFIG_LTO_CLANG
targets-for-modules += $(patsubst %.o, %.lto.o, $(filter %.o, $(obj-m)))
endif
+ifdef CONFIG_X86_KERNEL_IBT
+targets-for-modules += $(patsubst %.o, %.objtool, $(filter %.o, $(obj-m)))
+endif
+
+targets-for-modules += $(patsubst %.o, %.mod, $(filter %.o, $(obj-m)))
+
ifdef need-modorder
targets-for-modules += $(obj)/modules.order
endif
objtool_args = \
$(if $(CONFIG_UNWINDER_ORC),orc generate,check) \
$(if $(part-of-module), --module) \
+ $(if $(CONFIG_X86_KERNEL_IBT), --lto --ibt) \
$(if $(CONFIG_FRAME_POINTER),, --no-fp) \
$(if $(CONFIG_GCOV_KERNEL)$(CONFIG_LTO_CLANG), --no-unreachable)\
$(if $(CONFIG_RETPOLINE), --retpoline) \
$(if $(CONFIG_FTRACE_MCOUNT_USE_OBJTOOL), --mcount) \
$(if $(CONFIG_SLS), --sls)
-cmd_objtool = $(if $(objtool-enabled), ; $(objtool) $(objtool_args) $@)
-cmd_gen_objtooldep = $(if $(objtool-enabled), { echo ; echo '$@: $$(wildcard $(objtool))' ; } >> $(dot-target).cmd)
+cmd_objtool = $(if $(objtool-enabled), ; $(objtool) $(objtool_args) $(@:.objtool=.o))
+cmd_gen_objtooldep = $(if $(objtool-enabled), { echo ; echo '$(@:.objtool=.o): $$(wildcard $(objtool))' ; } >> $(dot-target).cmd)
endif # CONFIG_STACK_VALIDATION
# Skip objtool for LLVM bitcode
$(obj)/%.o: objtool-enabled :=
+# objtool was skipped for LLVM bitcode, run it now that we have compiled
+# modules into native code
+$(obj)/%.lto.o: objtool-enabled = y
+$(obj)/%.lto.o: part-of-module := y
+
+else ifdef CONFIG_X86_KERNEL_IBT
+
+# Skip objtool on individual files
+$(obj)/%.o: objtool-enabled :=
+
+# instead run objtool on the module as a whole, right before
+# the final link pass with the linker script.
+$(obj)/%.objtool: objtool-enabled = y
+$(obj)/%.objtool: part-of-module := y
+
else
# 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory
# Module .o files may contain LLVM bitcode, compile them into native code
# before ELF processing
quiet_cmd_cc_lto_link_modules = LTO [M] $@
-cmd_cc_lto_link_modules = \
+ cmd_cc_lto_link_modules = \
$(LD) $(ld_flags) -r -o $@ \
$(shell [ -s $(@:.lto.o=.o.symversions) ] && \
echo -T $(@:.lto.o=.o.symversions)) \
--whole-archive $(filter-out FORCE,$^) \
$(cmd_objtool)
-# objtool was skipped for LLVM bitcode, run it now that we have compiled
-# modules into native code
-$(obj)/%.lto.o: objtool-enabled = y
-$(obj)/%.lto.o: part-of-module := y
-
$(obj)/%.lto.o: $(obj)/%.o FORCE
$(call if_changed,cc_lto_link_modules)
endif
$(obj)/%.mod: $(obj)/%$(mod-prelink-ext).o FORCE
$(call if_changed,mod)
+#
+# Since objtool will re-write the file it will change the timestamps, therefore
+# it is critical that the %.objtool file gets a timestamp *after* objtool runs.
+#
+# Additionally, care must be had with ordering this rule against the other rules
+# that take %.o as a dependency.
+#
+cmd_objtool_mod = true $(cmd_objtool) ; touch $@
+
+$(obj)/%.objtool: $(obj)/%$(mod-prelink-ext).o FORCE
+ $(call if_changed,objtool_mod)
+
quiet_cmd_cc_lst_c = MKLST $@
cmd_cc_lst_c = $(CC) $(c_flags) -g -c -o $*.o $< && \
$(CONFIG_SHELL) $(srctree)/scripts/makelst $*.o \
local objtoolcmd;
local objtoolopt;
- if is_enabled CONFIG_LTO_CLANG && is_enabled CONFIG_STACK_VALIDATION; then
+ if is_enabled CONFIG_STACK_VALIDATION && \
+ ( is_enabled CONFIG_LTO_CLANG || is_enabled CONFIG_X86_KERNEL_IBT ); then
+
# Don't perform vmlinux validation unless explicitly requested,
# but run objtool on vmlinux.o now that we have an object file.
if is_enabled CONFIG_UNWINDER_ORC; then
objtoolopt="${objtoolopt} --lto"
+ if is_enabled CONFIG_X86_KERNEL_IBT; then
+ objtoolopt="${objtoolopt} --ibt"
+ fi
+
if is_enabled CONFIG_FTRACE_MCOUNT_USE_OBJTOOL; then
objtoolopt="${objtoolopt} --mcount"
fi
# skip output file argument
shift
- if is_enabled CONFIG_LTO_CLANG; then
+ if is_enabled CONFIG_LTO_CLANG || is_enabled CONFIG_X86_KERNEL_IBT; then
# Use vmlinux.o instead of performing the slow LTO link again.
objs=vmlinux.o
libs=