1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
11 #include <objtool/builtin.h>
12 #include <objtool/cfi.h>
13 #include <objtool/arch.h>
14 #include <objtool/check.h>
15 #include <objtool/special.h>
16 #include <objtool/warn.h>
17 #include <objtool/endianness.h>
19 #include <linux/objtool.h>
20 #include <linux/hashtable.h>
21 #include <linux/kernel.h>
22 #include <linux/static_call_types.h>
25 struct list_head list;
26 struct instruction *insn;
30 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
32 static struct cfi_init_state initial_func_cfi;
33 static struct cfi_state init_cfi;
34 static struct cfi_state func_cfi;
36 struct instruction *find_insn(struct objtool_file *file,
37 struct section *sec, unsigned long offset)
39 struct instruction *insn;
41 hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
42 if (insn->sec == sec && insn->offset == offset)
49 static struct instruction *next_insn_same_sec(struct objtool_file *file,
50 struct instruction *insn)
52 struct instruction *next = list_next_entry(insn, list);
54 if (!next || &next->list == &file->insn_list || next->sec != insn->sec)
60 static struct instruction *next_insn_same_func(struct objtool_file *file,
61 struct instruction *insn)
63 struct instruction *next = list_next_entry(insn, list);
64 struct symbol *func = insn->func;
69 if (&next->list != &file->insn_list && next->func == func)
72 /* Check if we're already in the subfunction: */
73 if (func == func->cfunc)
76 /* Move to the subfunction: */
77 return find_insn(file, func->cfunc->sec, func->cfunc->offset);
80 static struct instruction *prev_insn_same_sym(struct objtool_file *file,
81 struct instruction *insn)
83 struct instruction *prev = list_prev_entry(insn, list);
85 if (&prev->list != &file->insn_list && prev->func == insn->func)
91 #define func_for_each_insn(file, func, insn) \
92 for (insn = find_insn(file, func->sec, func->offset); \
94 insn = next_insn_same_func(file, insn))
96 #define sym_for_each_insn(file, sym, insn) \
97 for (insn = find_insn(file, sym->sec, sym->offset); \
98 insn && &insn->list != &file->insn_list && \
99 insn->sec == sym->sec && \
100 insn->offset < sym->offset + sym->len; \
101 insn = list_next_entry(insn, list))
103 #define sym_for_each_insn_continue_reverse(file, sym, insn) \
104 for (insn = list_prev_entry(insn, list); \
105 &insn->list != &file->insn_list && \
106 insn->sec == sym->sec && insn->offset >= sym->offset; \
107 insn = list_prev_entry(insn, list))
109 #define sec_for_each_insn_from(file, insn) \
110 for (; insn; insn = next_insn_same_sec(file, insn))
112 #define sec_for_each_insn_continue(file, insn) \
113 for (insn = next_insn_same_sec(file, insn); insn; \
114 insn = next_insn_same_sec(file, insn))
116 static bool is_jump_table_jump(struct instruction *insn)
118 struct alt_group *alt_group = insn->alt_group;
120 if (insn->jump_table)
123 /* Retpoline alternative for a jump table? */
124 return alt_group && alt_group->orig_group &&
125 alt_group->orig_group->first_insn->jump_table;
128 static bool is_sibling_call(struct instruction *insn)
131 * Assume only ELF functions can make sibling calls. This ensures
132 * sibling call detection consistency between vmlinux.o and individual
138 /* An indirect jump is either a sibling call or a jump to a table. */
139 if (insn->type == INSN_JUMP_DYNAMIC)
140 return !is_jump_table_jump(insn);
142 /* add_jump_destinations() sets insn->call_dest for sibling calls. */
143 return (is_static_jump(insn) && insn->call_dest);
147 * This checks to see if the given function is a "noreturn" function.
149 * For global functions which are outside the scope of this object file, we
150 * have to keep a manual list of them.
152 * For local functions, we have to detect them manually by simply looking for
153 * the lack of a return instruction.
155 static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
159 struct instruction *insn;
163 * Unfortunately these have to be hard coded because the noreturn
164 * attribute isn't provided in ELF data.
166 static const char * const global_noreturns[] = {
171 "__module_put_and_exit",
177 "machine_real_restart",
178 "rewind_stack_do_exit",
179 "kunit_try_catch_throw",
181 "cpu_bringup_and_idle",
187 if (func->bind == STB_WEAK)
190 if (func->bind == STB_GLOBAL)
191 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
192 if (!strcmp(func->name, global_noreturns[i]))
198 insn = find_insn(file, func->sec, func->offset);
202 func_for_each_insn(file, func, insn) {
205 if (insn->type == INSN_RETURN)
213 * A function can have a sibling call instead of a return. In that
214 * case, the function's dead-end status depends on whether the target
215 * of the sibling call returns.
217 func_for_each_insn(file, func, insn) {
218 if (is_sibling_call(insn)) {
219 struct instruction *dest = insn->jump_dest;
222 /* sibling call to another file */
225 /* local sibling call */
226 if (recursion == 5) {
228 * Infinite recursion: two functions have
229 * sibling calls to each other. This is a very
230 * rare case. It means they aren't dead ends.
235 return __dead_end_function(file, dest->func, recursion+1);
242 static bool dead_end_function(struct objtool_file *file, struct symbol *func)
244 return __dead_end_function(file, func, 0);
247 static void init_cfi_state(struct cfi_state *cfi)
251 for (i = 0; i < CFI_NUM_REGS; i++) {
252 cfi->regs[i].base = CFI_UNDEFINED;
253 cfi->vals[i].base = CFI_UNDEFINED;
255 cfi->cfa.base = CFI_UNDEFINED;
256 cfi->drap_reg = CFI_UNDEFINED;
257 cfi->drap_offset = -1;
260 static void init_insn_state(struct insn_state *state, struct section *sec)
262 memset(state, 0, sizeof(*state));
263 init_cfi_state(&state->cfi);
266 * We need the full vmlinux for noinstr validation, otherwise we can
267 * not correctly determine insn->call_dest->sec (external symbols do
268 * not have a section).
270 if (vmlinux && noinstr && sec)
271 state->noinstr = sec->noinstr;
274 static struct cfi_state *cfi_alloc(void)
276 struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1);
278 WARN("calloc failed");
286 static struct hlist_head *cfi_hash;
288 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2)
290 return memcmp((void *)cfi1 + sizeof(cfi1->hash),
291 (void *)cfi2 + sizeof(cfi2->hash),
292 sizeof(struct cfi_state) - sizeof(struct hlist_node));
295 static inline u32 cfi_key(struct cfi_state *cfi)
297 return jhash((void *)cfi + sizeof(cfi->hash),
298 sizeof(*cfi) - sizeof(cfi->hash), 0);
301 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi)
303 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
304 struct cfi_state *obj;
306 hlist_for_each_entry(obj, head, hash) {
307 if (!cficmp(cfi, obj)) {
315 hlist_add_head(&obj->hash, head);
320 static void cfi_hash_add(struct cfi_state *cfi)
322 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
324 hlist_add_head(&cfi->hash, head);
327 static void *cfi_hash_alloc(unsigned long size)
329 cfi_bits = max(10, ilog2(size));
330 cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits,
331 PROT_READ|PROT_WRITE,
332 MAP_PRIVATE|MAP_ANON, -1, 0);
333 if (cfi_hash == (void *)-1L) {
334 WARN("mmap fail cfi_hash");
337 printf("cfi_bits: %d\n", cfi_bits);
343 static unsigned long nr_insns;
344 static unsigned long nr_insns_visited;
347 * Call the arch-specific instruction decoder for all the instructions and add
348 * them to the global instruction list.
350 static int decode_instructions(struct objtool_file *file)
354 unsigned long offset;
355 struct instruction *insn;
358 for_each_sec(file, sec) {
360 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
363 if (strcmp(sec->name, ".altinstr_replacement") &&
364 strcmp(sec->name, ".altinstr_aux") &&
365 strncmp(sec->name, ".discard.", 9))
368 if (!strcmp(sec->name, ".noinstr.text") ||
369 !strcmp(sec->name, ".entry.text"))
372 for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
373 insn = malloc(sizeof(*insn));
375 WARN("malloc failed");
378 memset(insn, 0, sizeof(*insn));
379 INIT_LIST_HEAD(&insn->alts);
380 INIT_LIST_HEAD(&insn->stack_ops);
383 insn->offset = offset;
385 ret = arch_decode_instruction(file, sec, offset,
386 sec->sh.sh_size - offset,
387 &insn->len, &insn->type,
393 hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
394 list_add_tail(&insn->list, &file->insn_list);
398 list_for_each_entry(func, &sec->symbol_list, list) {
399 if (func->type != STT_FUNC || func->alias != func)
402 if (!find_insn(file, sec, func->offset)) {
403 WARN("%s(): can't find starting instruction",
408 sym_for_each_insn(file, func, insn)
414 printf("nr_insns: %lu\n", nr_insns);
424 * Read the pv_ops[] .data table to find the static initialized values.
426 static int add_pv_ops(struct objtool_file *file, const char *symname)
428 struct symbol *sym, *func;
429 unsigned long off, end;
433 sym = find_symbol_by_name(file->elf, symname);
438 end = off + sym->len;
440 rel = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off);
445 if (func->type == STT_SECTION)
446 func = find_symbol_by_offset(rel->sym->sec, rel->addend);
448 idx = (rel->offset - sym->offset) / sizeof(unsigned long);
450 objtool_pv_add(file, idx, func);
452 off = rel->offset + 1;
461 * Allocate and initialize file->pv_ops[].
463 static int init_pv_ops(struct objtool_file *file)
465 static const char *pv_ops_tables[] = {
481 sym = find_symbol_by_name(file->elf, "pv_ops");
485 nr = sym->len / sizeof(unsigned long);
486 file->pv_ops = calloc(sizeof(struct pv_state), nr);
490 for (idx = 0; idx < nr; idx++)
491 INIT_LIST_HEAD(&file->pv_ops[idx].targets);
493 for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++)
494 add_pv_ops(file, pv_ops);
499 static struct instruction *find_last_insn(struct objtool_file *file,
502 struct instruction *insn = NULL;
504 unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0;
506 for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--)
507 insn = find_insn(file, sec, offset);
513 * Mark "ud2" instructions and manually annotated dead ends.
515 static int add_dead_ends(struct objtool_file *file)
519 struct instruction *insn;
522 * By default, "ud2" is a dead end unless otherwise annotated, because
523 * GCC 7 inserts it for certain divide-by-zero cases.
525 for_each_insn(file, insn)
526 if (insn->type == INSN_BUG)
527 insn->dead_end = true;
530 * Check for manually annotated dead ends.
532 sec = find_section_by_name(file->elf, ".rela.discard.unreachable");
536 list_for_each_entry(reloc, &sec->reloc_list, list) {
537 if (reloc->sym->type != STT_SECTION) {
538 WARN("unexpected relocation symbol type in %s", sec->name);
541 insn = find_insn(file, reloc->sym->sec, reloc->addend);
543 insn = list_prev_entry(insn, list);
544 else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
545 insn = find_last_insn(file, reloc->sym->sec);
547 WARN("can't find unreachable insn at %s+0x%x",
548 reloc->sym->sec->name, reloc->addend);
552 WARN("can't find unreachable insn at %s+0x%x",
553 reloc->sym->sec->name, reloc->addend);
557 insn->dead_end = true;
562 * These manually annotated reachable checks are needed for GCC 4.4,
563 * where the Linux unreachable() macro isn't supported. In that case
564 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's
567 sec = find_section_by_name(file->elf, ".rela.discard.reachable");
571 list_for_each_entry(reloc, &sec->reloc_list, list) {
572 if (reloc->sym->type != STT_SECTION) {
573 WARN("unexpected relocation symbol type in %s", sec->name);
576 insn = find_insn(file, reloc->sym->sec, reloc->addend);
578 insn = list_prev_entry(insn, list);
579 else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
580 insn = find_last_insn(file, reloc->sym->sec);
582 WARN("can't find reachable insn at %s+0x%x",
583 reloc->sym->sec->name, reloc->addend);
587 WARN("can't find reachable insn at %s+0x%x",
588 reloc->sym->sec->name, reloc->addend);
592 insn->dead_end = false;
598 static int create_static_call_sections(struct objtool_file *file)
601 struct static_call_site *site;
602 struct instruction *insn;
603 struct symbol *key_sym;
604 char *key_name, *tmp;
607 sec = find_section_by_name(file->elf, ".static_call_sites");
609 INIT_LIST_HEAD(&file->static_call_list);
610 WARN("file already has .static_call_sites section, skipping");
614 if (list_empty(&file->static_call_list))
618 list_for_each_entry(insn, &file->static_call_list, call_node)
621 sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE,
622 sizeof(struct static_call_site), idx);
627 list_for_each_entry(insn, &file->static_call_list, call_node) {
629 site = (struct static_call_site *)sec->data->d_buf + idx;
630 memset(site, 0, sizeof(struct static_call_site));
632 /* populate reloc for 'addr' */
633 if (elf_add_reloc_to_insn(file->elf, sec,
634 idx * sizeof(struct static_call_site),
636 insn->sec, insn->offset))
639 /* find key symbol */
640 key_name = strdup(insn->call_dest->name);
645 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
646 STATIC_CALL_TRAMP_PREFIX_LEN)) {
647 WARN("static_call: trampoline name malformed: %s", key_name);
650 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
651 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
653 key_sym = find_symbol_by_name(file->elf, tmp);
656 WARN("static_call: can't find static_call_key symbol: %s", tmp);
661 * For modules(), the key might not be exported, which
662 * means the module can make static calls but isn't
663 * allowed to change them.
665 * In that case we temporarily set the key to be the
666 * trampoline address. This is fixed up in
667 * static_call_add_module().
669 key_sym = insn->call_dest;
673 /* populate reloc for 'key' */
674 if (elf_add_reloc(file->elf, sec,
675 idx * sizeof(struct static_call_site) + 4,
676 R_X86_64_PC32, key_sym,
677 is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
686 static int create_retpoline_sites_sections(struct objtool_file *file)
688 struct instruction *insn;
692 sec = find_section_by_name(file->elf, ".retpoline_sites");
694 WARN("file already has .retpoline_sites, skipping");
699 list_for_each_entry(insn, &file->retpoline_call_list, call_node)
705 sec = elf_create_section(file->elf, ".retpoline_sites", 0,
708 WARN("elf_create_section: .retpoline_sites");
713 list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
715 int *site = (int *)sec->data->d_buf + idx;
718 if (elf_add_reloc_to_insn(file->elf, sec,
721 insn->sec, insn->offset)) {
722 WARN("elf_add_reloc_to_insn: .retpoline_sites");
732 static int create_mcount_loc_sections(struct objtool_file *file)
736 struct instruction *insn;
739 sec = find_section_by_name(file->elf, "__mcount_loc");
741 INIT_LIST_HEAD(&file->mcount_loc_list);
742 WARN("file already has __mcount_loc section, skipping");
746 if (list_empty(&file->mcount_loc_list))
750 list_for_each_entry(insn, &file->mcount_loc_list, call_node)
753 sec = elf_create_section(file->elf, "__mcount_loc", 0, sizeof(unsigned long), idx);
758 list_for_each_entry(insn, &file->mcount_loc_list, call_node) {
760 loc = (unsigned long *)sec->data->d_buf + idx;
761 memset(loc, 0, sizeof(unsigned long));
763 if (elf_add_reloc_to_insn(file->elf, sec,
764 idx * sizeof(unsigned long),
766 insn->sec, insn->offset))
776 * Warnings shouldn't be reported for ignored functions.
778 static void add_ignores(struct objtool_file *file)
780 struct instruction *insn;
785 sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
789 list_for_each_entry(reloc, &sec->reloc_list, list) {
790 switch (reloc->sym->type) {
796 func = find_func_by_offset(reloc->sym->sec, reloc->addend);
802 WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type);
806 func_for_each_insn(file, func, insn)
812 * This is a whitelist of functions that is allowed to be called with AC set.
813 * The list is meant to be minimal and only contains compiler instrumentation
814 * ABI and a few functions used to implement *_{to,from}_user() functions.
816 * These functions must not directly change AC, but may PUSHF/POPF.
818 static const char *uaccess_safe_builtin[] = {
822 /* KASAN out-of-line */
823 "__asan_loadN_noabort",
824 "__asan_load1_noabort",
825 "__asan_load2_noabort",
826 "__asan_load4_noabort",
827 "__asan_load8_noabort",
828 "__asan_load16_noabort",
829 "__asan_storeN_noabort",
830 "__asan_store1_noabort",
831 "__asan_store2_noabort",
832 "__asan_store4_noabort",
833 "__asan_store8_noabort",
834 "__asan_store16_noabort",
835 "__kasan_check_read",
836 "__kasan_check_write",
838 "__asan_report_load_n_noabort",
839 "__asan_report_load1_noabort",
840 "__asan_report_load2_noabort",
841 "__asan_report_load4_noabort",
842 "__asan_report_load8_noabort",
843 "__asan_report_load16_noabort",
844 "__asan_report_store_n_noabort",
845 "__asan_report_store1_noabort",
846 "__asan_report_store2_noabort",
847 "__asan_report_store4_noabort",
848 "__asan_report_store8_noabort",
849 "__asan_report_store16_noabort",
851 "__kcsan_check_access",
856 "kcsan_found_watchpoint",
857 "kcsan_setup_watchpoint",
858 "kcsan_check_scoped_accesses",
859 "kcsan_disable_current",
860 "kcsan_enable_current_nowarn",
865 "__tsan_write_range",
876 "__tsan_read_write1",
877 "__tsan_read_write2",
878 "__tsan_read_write4",
879 "__tsan_read_write8",
880 "__tsan_read_write16",
881 "__tsan_atomic8_load",
882 "__tsan_atomic16_load",
883 "__tsan_atomic32_load",
884 "__tsan_atomic64_load",
885 "__tsan_atomic8_store",
886 "__tsan_atomic16_store",
887 "__tsan_atomic32_store",
888 "__tsan_atomic64_store",
889 "__tsan_atomic8_exchange",
890 "__tsan_atomic16_exchange",
891 "__tsan_atomic32_exchange",
892 "__tsan_atomic64_exchange",
893 "__tsan_atomic8_fetch_add",
894 "__tsan_atomic16_fetch_add",
895 "__tsan_atomic32_fetch_add",
896 "__tsan_atomic64_fetch_add",
897 "__tsan_atomic8_fetch_sub",
898 "__tsan_atomic16_fetch_sub",
899 "__tsan_atomic32_fetch_sub",
900 "__tsan_atomic64_fetch_sub",
901 "__tsan_atomic8_fetch_and",
902 "__tsan_atomic16_fetch_and",
903 "__tsan_atomic32_fetch_and",
904 "__tsan_atomic64_fetch_and",
905 "__tsan_atomic8_fetch_or",
906 "__tsan_atomic16_fetch_or",
907 "__tsan_atomic32_fetch_or",
908 "__tsan_atomic64_fetch_or",
909 "__tsan_atomic8_fetch_xor",
910 "__tsan_atomic16_fetch_xor",
911 "__tsan_atomic32_fetch_xor",
912 "__tsan_atomic64_fetch_xor",
913 "__tsan_atomic8_fetch_nand",
914 "__tsan_atomic16_fetch_nand",
915 "__tsan_atomic32_fetch_nand",
916 "__tsan_atomic64_fetch_nand",
917 "__tsan_atomic8_compare_exchange_strong",
918 "__tsan_atomic16_compare_exchange_strong",
919 "__tsan_atomic32_compare_exchange_strong",
920 "__tsan_atomic64_compare_exchange_strong",
921 "__tsan_atomic8_compare_exchange_weak",
922 "__tsan_atomic16_compare_exchange_weak",
923 "__tsan_atomic32_compare_exchange_weak",
924 "__tsan_atomic64_compare_exchange_weak",
925 "__tsan_atomic8_compare_exchange_val",
926 "__tsan_atomic16_compare_exchange_val",
927 "__tsan_atomic32_compare_exchange_val",
928 "__tsan_atomic64_compare_exchange_val",
929 "__tsan_atomic_thread_fence",
930 "__tsan_atomic_signal_fence",
934 "__sanitizer_cov_trace_pc",
935 "__sanitizer_cov_trace_const_cmp1",
936 "__sanitizer_cov_trace_const_cmp2",
937 "__sanitizer_cov_trace_const_cmp4",
938 "__sanitizer_cov_trace_const_cmp8",
939 "__sanitizer_cov_trace_cmp1",
940 "__sanitizer_cov_trace_cmp2",
941 "__sanitizer_cov_trace_cmp4",
942 "__sanitizer_cov_trace_cmp8",
943 "__sanitizer_cov_trace_switch",
945 "ubsan_type_mismatch_common",
946 "__ubsan_handle_type_mismatch",
947 "__ubsan_handle_type_mismatch_v1",
948 "__ubsan_handle_shift_out_of_bounds",
950 "csum_partial_copy_generic",
952 "copy_mc_fragile_handle_tail",
953 "copy_mc_enhanced_fast_string",
954 "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
958 static void add_uaccess_safe(struct objtool_file *file)
966 for (name = uaccess_safe_builtin; *name; name++) {
967 func = find_symbol_by_name(file->elf, *name);
971 func->uaccess_safe = true;
976 * FIXME: For now, just ignore any alternatives which add retpolines. This is
977 * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
978 * But it at least allows objtool to understand the control flow *around* the
981 static int add_ignore_alternatives(struct objtool_file *file)
985 struct instruction *insn;
987 sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts");
991 list_for_each_entry(reloc, &sec->reloc_list, list) {
992 if (reloc->sym->type != STT_SECTION) {
993 WARN("unexpected relocation symbol type in %s", sec->name);
997 insn = find_insn(file, reloc->sym->sec, reloc->addend);
999 WARN("bad .discard.ignore_alts entry");
1003 insn->ignore_alts = true;
1009 __weak bool arch_is_retpoline(struct symbol *sym)
1014 #define NEGATIVE_RELOC ((void *)-1L)
1016 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
1018 if (insn->reloc == NEGATIVE_RELOC)
1025 insn->reloc = find_reloc_by_dest_range(file->elf, insn->sec,
1026 insn->offset, insn->len);
1028 insn->reloc = NEGATIVE_RELOC;
1036 static void remove_insn_ops(struct instruction *insn)
1038 struct stack_op *op, *tmp;
1040 list_for_each_entry_safe(op, tmp, &insn->stack_ops, list) {
1041 list_del(&op->list);
1046 static void annotate_call_site(struct objtool_file *file,
1047 struct instruction *insn, bool sibling)
1049 struct reloc *reloc = insn_reloc(file, insn);
1050 struct symbol *sym = insn->call_dest;
1056 * Alternative replacement code is just template code which is
1057 * sometimes copied to the original instruction. For now, don't
1058 * annotate it. (In the future we might consider annotating the
1059 * original instruction if/when it ever makes sense to do so.)
1061 if (!strcmp(insn->sec->name, ".altinstr_replacement"))
1064 if (sym->static_call_tramp) {
1065 list_add_tail(&insn->call_node, &file->static_call_list);
1069 if (sym->retpoline_thunk) {
1070 list_add_tail(&insn->call_node, &file->retpoline_call_list);
1075 * Many compilers cannot disable KCOV with a function attribute
1076 * so they need a little help, NOP out any KCOV calls from noinstr
1079 if (insn->sec->noinstr && sym->kcov) {
1081 reloc->type = R_NONE;
1082 elf_write_reloc(file->elf, reloc);
1085 elf_write_insn(file->elf, insn->sec,
1086 insn->offset, insn->len,
1087 sibling ? arch_ret_insn(insn->len)
1088 : arch_nop_insn(insn->len));
1090 insn->type = sibling ? INSN_RETURN : INSN_NOP;
1094 if (mcount && sym->fentry) {
1096 WARN_FUNC("Tail call to __fentry__ !?!?", insn->sec, insn->offset);
1099 reloc->type = R_NONE;
1100 elf_write_reloc(file->elf, reloc);
1103 elf_write_insn(file->elf, insn->sec,
1104 insn->offset, insn->len,
1105 arch_nop_insn(insn->len));
1107 insn->type = INSN_NOP;
1109 list_add_tail(&insn->call_node, &file->mcount_loc_list);
1114 static void add_call_dest(struct objtool_file *file, struct instruction *insn,
1115 struct symbol *dest, bool sibling)
1117 insn->call_dest = dest;
1122 * Whatever stack impact regular CALLs have, should be undone
1123 * by the RETURN of the called function.
1125 * Annotated intra-function calls retain the stack_ops but
1126 * are converted to JUMP, see read_intra_function_calls().
1128 remove_insn_ops(insn);
1130 annotate_call_site(file, insn, sibling);
1133 static void add_retpoline_call(struct objtool_file *file, struct instruction *insn)
1136 * Retpoline calls/jumps are really dynamic calls/jumps in disguise,
1137 * so convert them accordingly.
1139 switch (insn->type) {
1141 insn->type = INSN_CALL_DYNAMIC;
1143 case INSN_JUMP_UNCONDITIONAL:
1144 insn->type = INSN_JUMP_DYNAMIC;
1146 case INSN_JUMP_CONDITIONAL:
1147 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
1153 insn->retpoline_safe = true;
1156 * Whatever stack impact regular CALLs have, should be undone
1157 * by the RETURN of the called function.
1159 * Annotated intra-function calls retain the stack_ops but
1160 * are converted to JUMP, see read_intra_function_calls().
1162 remove_insn_ops(insn);
1164 annotate_call_site(file, insn, false);
1167 * Find the destination instructions for all jumps.
1169 static int add_jump_destinations(struct objtool_file *file)
1171 struct instruction *insn;
1172 struct reloc *reloc;
1173 struct section *dest_sec;
1174 unsigned long dest_off;
1176 for_each_insn(file, insn) {
1177 if (!is_static_jump(insn))
1180 reloc = insn_reloc(file, insn);
1182 dest_sec = insn->sec;
1183 dest_off = arch_jump_destination(insn);
1184 } else if (reloc->sym->type == STT_SECTION) {
1185 dest_sec = reloc->sym->sec;
1186 dest_off = arch_dest_reloc_offset(reloc->addend);
1187 } else if (reloc->sym->retpoline_thunk) {
1188 add_retpoline_call(file, insn);
1190 } else if (insn->func) {
1191 /* internal or external sibling call (with reloc) */
1192 add_call_dest(file, insn, reloc->sym, true);
1194 } else if (reloc->sym->sec->idx) {
1195 dest_sec = reloc->sym->sec;
1196 dest_off = reloc->sym->sym.st_value +
1197 arch_dest_reloc_offset(reloc->addend);
1199 /* non-func asm code jumping to another file */
1203 insn->jump_dest = find_insn(file, dest_sec, dest_off);
1204 if (!insn->jump_dest) {
1207 * This is a special case where an alt instruction
1208 * jumps past the end of the section. These are
1209 * handled later in handle_group_alt().
1211 if (!strcmp(insn->sec->name, ".altinstr_replacement"))
1214 WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
1215 insn->sec, insn->offset, dest_sec->name,
1221 * Cross-function jump.
1223 if (insn->func && insn->jump_dest->func &&
1224 insn->func != insn->jump_dest->func) {
1227 * For GCC 8+, create parent/child links for any cold
1228 * subfunctions. This is _mostly_ redundant with a
1229 * similar initialization in read_symbols().
1231 * If a function has aliases, we want the *first* such
1232 * function in the symbol table to be the subfunction's
1233 * parent. In that case we overwrite the
1234 * initialization done in read_symbols().
1236 * However this code can't completely replace the
1237 * read_symbols() code because this doesn't detect the
1238 * case where the parent function's only reference to a
1239 * subfunction is through a jump table.
1241 if (!strstr(insn->func->name, ".cold") &&
1242 strstr(insn->jump_dest->func->name, ".cold")) {
1243 insn->func->cfunc = insn->jump_dest->func;
1244 insn->jump_dest->func->pfunc = insn->func;
1246 } else if (insn->jump_dest->func->pfunc != insn->func->pfunc &&
1247 insn->jump_dest->offset == insn->jump_dest->func->offset) {
1248 /* internal sibling call (without reloc) */
1249 add_call_dest(file, insn, insn->jump_dest->func, true);
1257 static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
1259 struct symbol *call_dest;
1261 call_dest = find_func_by_offset(sec, offset);
1263 call_dest = find_symbol_by_offset(sec, offset);
1269 * Find the destination instructions for all calls.
1271 static int add_call_destinations(struct objtool_file *file)
1273 struct instruction *insn;
1274 unsigned long dest_off;
1275 struct symbol *dest;
1276 struct reloc *reloc;
1278 for_each_insn(file, insn) {
1279 if (insn->type != INSN_CALL)
1282 reloc = insn_reloc(file, insn);
1284 dest_off = arch_jump_destination(insn);
1285 dest = find_call_destination(insn->sec, dest_off);
1287 add_call_dest(file, insn, dest, false);
1292 if (!insn->call_dest) {
1293 WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset);
1297 if (insn->func && insn->call_dest->type != STT_FUNC) {
1298 WARN_FUNC("unsupported call to non-function",
1299 insn->sec, insn->offset);
1303 } else if (reloc->sym->type == STT_SECTION) {
1304 dest_off = arch_dest_reloc_offset(reloc->addend);
1305 dest = find_call_destination(reloc->sym->sec, dest_off);
1307 WARN_FUNC("can't find call dest symbol at %s+0x%lx",
1308 insn->sec, insn->offset,
1309 reloc->sym->sec->name,
1314 add_call_dest(file, insn, dest, false);
1316 } else if (reloc->sym->retpoline_thunk) {
1317 add_retpoline_call(file, insn);
1320 add_call_dest(file, insn, reloc->sym, false);
1327 * The .alternatives section requires some extra special care over and above
1328 * other special sections because alternatives are patched in place.
1330 static int handle_group_alt(struct objtool_file *file,
1331 struct special_alt *special_alt,
1332 struct instruction *orig_insn,
1333 struct instruction **new_insn)
1335 struct instruction *last_orig_insn, *last_new_insn = NULL, *insn, *nop = NULL;
1336 struct alt_group *orig_alt_group, *new_alt_group;
1337 unsigned long dest_off;
1340 orig_alt_group = malloc(sizeof(*orig_alt_group));
1341 if (!orig_alt_group) {
1342 WARN("malloc failed");
1345 orig_alt_group->cfi = calloc(special_alt->orig_len,
1346 sizeof(struct cfi_state *));
1347 if (!orig_alt_group->cfi) {
1348 WARN("calloc failed");
1352 last_orig_insn = NULL;
1354 sec_for_each_insn_from(file, insn) {
1355 if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
1358 insn->alt_group = orig_alt_group;
1359 last_orig_insn = insn;
1361 orig_alt_group->orig_group = NULL;
1362 orig_alt_group->first_insn = orig_insn;
1363 orig_alt_group->last_insn = last_orig_insn;
1366 new_alt_group = malloc(sizeof(*new_alt_group));
1367 if (!new_alt_group) {
1368 WARN("malloc failed");
1372 if (special_alt->new_len < special_alt->orig_len) {
1374 * Insert a fake nop at the end to make the replacement
1375 * alt_group the same size as the original. This is needed to
1376 * allow propagate_alt_cfi() to do its magic. When the last
1377 * instruction affects the stack, the instruction after it (the
1378 * nop) will propagate the new state to the shared CFI array.
1380 nop = malloc(sizeof(*nop));
1382 WARN("malloc failed");
1385 memset(nop, 0, sizeof(*nop));
1386 INIT_LIST_HEAD(&nop->alts);
1387 INIT_LIST_HEAD(&nop->stack_ops);
1389 nop->sec = special_alt->new_sec;
1390 nop->offset = special_alt->new_off + special_alt->new_len;
1391 nop->len = special_alt->orig_len - special_alt->new_len;
1392 nop->type = INSN_NOP;
1393 nop->func = orig_insn->func;
1394 nop->alt_group = new_alt_group;
1395 nop->ignore = orig_insn->ignore_alts;
1398 if (!special_alt->new_len) {
1404 sec_for_each_insn_from(file, insn) {
1405 struct reloc *alt_reloc;
1407 if (insn->offset >= special_alt->new_off + special_alt->new_len)
1410 last_new_insn = insn;
1412 insn->ignore = orig_insn->ignore_alts;
1413 insn->func = orig_insn->func;
1414 insn->alt_group = new_alt_group;
1417 * Since alternative replacement code is copy/pasted by the
1418 * kernel after applying relocations, generally such code can't
1419 * have relative-address relocation references to outside the
1420 * .altinstr_replacement section, unless the arch's
1421 * alternatives code can adjust the relative offsets
1424 alt_reloc = insn_reloc(file, insn);
1426 !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1428 WARN_FUNC("unsupported relocation in alternatives section",
1429 insn->sec, insn->offset);
1433 if (!is_static_jump(insn))
1436 if (!insn->immediate)
1439 dest_off = arch_jump_destination(insn);
1440 if (dest_off == special_alt->new_off + special_alt->new_len)
1441 insn->jump_dest = next_insn_same_sec(file, last_orig_insn);
1443 if (!insn->jump_dest) {
1444 WARN_FUNC("can't find alternative jump destination",
1445 insn->sec, insn->offset);
1450 if (!last_new_insn) {
1451 WARN_FUNC("can't find last new alternative instruction",
1452 special_alt->new_sec, special_alt->new_off);
1457 list_add(&nop->list, &last_new_insn->list);
1459 new_alt_group->orig_group = orig_alt_group;
1460 new_alt_group->first_insn = *new_insn;
1461 new_alt_group->last_insn = nop ? : last_new_insn;
1462 new_alt_group->cfi = orig_alt_group->cfi;
1467 * A jump table entry can either convert a nop to a jump or a jump to a nop.
1468 * If the original instruction is a jump, make the alt entry an effective nop
1469 * by just skipping the original instruction.
1471 static int handle_jump_alt(struct objtool_file *file,
1472 struct special_alt *special_alt,
1473 struct instruction *orig_insn,
1474 struct instruction **new_insn)
1476 if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
1477 orig_insn->type != INSN_NOP) {
1479 WARN_FUNC("unsupported instruction at jump label",
1480 orig_insn->sec, orig_insn->offset);
1484 if (special_alt->key_addend & 2) {
1485 struct reloc *reloc = insn_reloc(file, orig_insn);
1488 reloc->type = R_NONE;
1489 elf_write_reloc(file->elf, reloc);
1491 elf_write_insn(file->elf, orig_insn->sec,
1492 orig_insn->offset, orig_insn->len,
1493 arch_nop_insn(orig_insn->len));
1494 orig_insn->type = INSN_NOP;
1497 if (orig_insn->type == INSN_NOP) {
1498 if (orig_insn->len == 2)
1499 file->jl_nop_short++;
1501 file->jl_nop_long++;
1506 if (orig_insn->len == 2)
1511 *new_insn = list_next_entry(orig_insn, list);
1516 * Read all the special sections which have alternate instructions which can be
1517 * patched in or redirected to at runtime. Each instruction having alternate
1518 * instruction(s) has them added to its insn->alts list, which will be
1519 * traversed in validate_branch().
1521 static int add_special_section_alts(struct objtool_file *file)
1523 struct list_head special_alts;
1524 struct instruction *orig_insn, *new_insn;
1525 struct special_alt *special_alt, *tmp;
1526 struct alternative *alt;
1529 ret = special_get_alts(file->elf, &special_alts);
1533 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
1535 orig_insn = find_insn(file, special_alt->orig_sec,
1536 special_alt->orig_off);
1538 WARN_FUNC("special: can't find orig instruction",
1539 special_alt->orig_sec, special_alt->orig_off);
1545 if (!special_alt->group || special_alt->new_len) {
1546 new_insn = find_insn(file, special_alt->new_sec,
1547 special_alt->new_off);
1549 WARN_FUNC("special: can't find new instruction",
1550 special_alt->new_sec,
1551 special_alt->new_off);
1557 if (special_alt->group) {
1558 if (!special_alt->orig_len) {
1559 WARN_FUNC("empty alternative entry",
1560 orig_insn->sec, orig_insn->offset);
1564 ret = handle_group_alt(file, special_alt, orig_insn,
1568 } else if (special_alt->jump_or_nop) {
1569 ret = handle_jump_alt(file, special_alt, orig_insn,
1575 alt = malloc(sizeof(*alt));
1577 WARN("malloc failed");
1582 alt->insn = new_insn;
1583 alt->skip_orig = special_alt->skip_orig;
1584 orig_insn->ignore_alts |= special_alt->skip_alt;
1585 list_add_tail(&alt->list, &orig_insn->alts);
1587 list_del(&special_alt->list);
1592 printf("jl\\\tNOP\tJMP\n");
1593 printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
1594 printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
1601 static int add_jump_table(struct objtool_file *file, struct instruction *insn,
1602 struct reloc *table)
1604 struct reloc *reloc = table;
1605 struct instruction *dest_insn;
1606 struct alternative *alt;
1607 struct symbol *pfunc = insn->func->pfunc;
1608 unsigned int prev_offset = 0;
1611 * Each @reloc is a switch table relocation which points to the target
1614 list_for_each_entry_from(reloc, &table->sec->reloc_list, list) {
1616 /* Check for the end of the table: */
1617 if (reloc != table && reloc->jump_table_start)
1620 /* Make sure the table entries are consecutive: */
1621 if (prev_offset && reloc->offset != prev_offset + 8)
1624 /* Detect function pointers from contiguous objects: */
1625 if (reloc->sym->sec == pfunc->sec &&
1626 reloc->addend == pfunc->offset)
1629 dest_insn = find_insn(file, reloc->sym->sec, reloc->addend);
1633 /* Make sure the destination is in the same function: */
1634 if (!dest_insn->func || dest_insn->func->pfunc != pfunc)
1637 alt = malloc(sizeof(*alt));
1639 WARN("malloc failed");
1643 alt->insn = dest_insn;
1644 list_add_tail(&alt->list, &insn->alts);
1645 prev_offset = reloc->offset;
1649 WARN_FUNC("can't find switch jump table",
1650 insn->sec, insn->offset);
1658 * find_jump_table() - Given a dynamic jump, find the switch jump table
1659 * associated with it.
1661 static struct reloc *find_jump_table(struct objtool_file *file,
1662 struct symbol *func,
1663 struct instruction *insn)
1665 struct reloc *table_reloc;
1666 struct instruction *dest_insn, *orig_insn = insn;
1669 * Backward search using the @first_jump_src links, these help avoid
1670 * much of the 'in between' code. Which avoids us getting confused by
1674 insn && insn->func && insn->func->pfunc == func;
1675 insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
1677 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
1680 /* allow small jumps within the range */
1681 if (insn->type == INSN_JUMP_UNCONDITIONAL &&
1683 (insn->jump_dest->offset <= insn->offset ||
1684 insn->jump_dest->offset > orig_insn->offset))
1687 table_reloc = arch_find_switch_table(file, insn);
1690 dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend);
1691 if (!dest_insn || !dest_insn->func || dest_insn->func->pfunc != func)
1701 * First pass: Mark the head of each jump table so that in the next pass,
1702 * we know when a given jump table ends and the next one starts.
1704 static void mark_func_jump_tables(struct objtool_file *file,
1705 struct symbol *func)
1707 struct instruction *insn, *last = NULL;
1708 struct reloc *reloc;
1710 func_for_each_insn(file, func, insn) {
1715 * Store back-pointers for unconditional forward jumps such
1716 * that find_jump_table() can back-track using those and
1717 * avoid some potentially confusing code.
1719 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
1720 insn->offset > last->offset &&
1721 insn->jump_dest->offset > insn->offset &&
1722 !insn->jump_dest->first_jump_src) {
1724 insn->jump_dest->first_jump_src = insn;
1725 last = insn->jump_dest;
1728 if (insn->type != INSN_JUMP_DYNAMIC)
1731 reloc = find_jump_table(file, func, insn);
1733 reloc->jump_table_start = true;
1734 insn->jump_table = reloc;
1739 static int add_func_jump_tables(struct objtool_file *file,
1740 struct symbol *func)
1742 struct instruction *insn;
1745 func_for_each_insn(file, func, insn) {
1746 if (!insn->jump_table)
1749 ret = add_jump_table(file, insn, insn->jump_table);
1758 * For some switch statements, gcc generates a jump table in the .rodata
1759 * section which contains a list of addresses within the function to jump to.
1760 * This finds these jump tables and adds them to the insn->alts lists.
1762 static int add_jump_table_alts(struct objtool_file *file)
1764 struct section *sec;
1765 struct symbol *func;
1771 for_each_sec(file, sec) {
1772 list_for_each_entry(func, &sec->symbol_list, list) {
1773 if (func->type != STT_FUNC)
1776 mark_func_jump_tables(file, func);
1777 ret = add_func_jump_tables(file, func);
1786 static void set_func_state(struct cfi_state *state)
1788 state->cfa = initial_func_cfi.cfa;
1789 memcpy(&state->regs, &initial_func_cfi.regs,
1790 CFI_NUM_REGS * sizeof(struct cfi_reg));
1791 state->stack_size = initial_func_cfi.cfa.offset;
1794 static int read_unwind_hints(struct objtool_file *file)
1796 struct cfi_state cfi = init_cfi;
1797 struct section *sec, *relocsec;
1798 struct unwind_hint *hint;
1799 struct instruction *insn;
1800 struct reloc *reloc;
1803 sec = find_section_by_name(file->elf, ".discard.unwind_hints");
1807 relocsec = sec->reloc;
1809 WARN("missing .rela.discard.unwind_hints section");
1813 if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
1814 WARN("struct unwind_hint size mismatch");
1820 for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) {
1821 hint = (struct unwind_hint *)sec->data->d_buf + i;
1823 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
1825 WARN("can't find reloc for unwind_hints[%d]", i);
1829 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1831 WARN("can't find insn for unwind_hints[%d]", i);
1837 if (hint->type == UNWIND_HINT_TYPE_FUNC) {
1838 insn->cfi = &func_cfi;
1845 if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
1846 WARN_FUNC("unsupported unwind_hint sp base reg %d",
1847 insn->sec, insn->offset, hint->sp_reg);
1851 cfi.cfa.offset = bswap_if_needed(hint->sp_offset);
1852 cfi.type = hint->type;
1853 cfi.end = hint->end;
1855 insn->cfi = cfi_hash_find_or_add(&cfi);
1861 static int read_retpoline_hints(struct objtool_file *file)
1863 struct section *sec;
1864 struct instruction *insn;
1865 struct reloc *reloc;
1867 sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
1871 list_for_each_entry(reloc, &sec->reloc_list, list) {
1872 if (reloc->sym->type != STT_SECTION) {
1873 WARN("unexpected relocation symbol type in %s", sec->name);
1877 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1879 WARN("bad .discard.retpoline_safe entry");
1883 if (insn->type != INSN_JUMP_DYNAMIC &&
1884 insn->type != INSN_CALL_DYNAMIC) {
1885 WARN_FUNC("retpoline_safe hint not an indirect jump/call",
1886 insn->sec, insn->offset);
1890 insn->retpoline_safe = true;
1896 static int read_instr_hints(struct objtool_file *file)
1898 struct section *sec;
1899 struct instruction *insn;
1900 struct reloc *reloc;
1902 sec = find_section_by_name(file->elf, ".rela.discard.instr_end");
1906 list_for_each_entry(reloc, &sec->reloc_list, list) {
1907 if (reloc->sym->type != STT_SECTION) {
1908 WARN("unexpected relocation symbol type in %s", sec->name);
1912 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1914 WARN("bad .discard.instr_end entry");
1921 sec = find_section_by_name(file->elf, ".rela.discard.instr_begin");
1925 list_for_each_entry(reloc, &sec->reloc_list, list) {
1926 if (reloc->sym->type != STT_SECTION) {
1927 WARN("unexpected relocation symbol type in %s", sec->name);
1931 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1933 WARN("bad .discard.instr_begin entry");
1943 static int read_intra_function_calls(struct objtool_file *file)
1945 struct instruction *insn;
1946 struct section *sec;
1947 struct reloc *reloc;
1949 sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls");
1953 list_for_each_entry(reloc, &sec->reloc_list, list) {
1954 unsigned long dest_off;
1956 if (reloc->sym->type != STT_SECTION) {
1957 WARN("unexpected relocation symbol type in %s",
1962 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1964 WARN("bad .discard.intra_function_call entry");
1968 if (insn->type != INSN_CALL) {
1969 WARN_FUNC("intra_function_call not a direct call",
1970 insn->sec, insn->offset);
1975 * Treat intra-function CALLs as JMPs, but with a stack_op.
1976 * See add_call_destinations(), which strips stack_ops from
1979 insn->type = INSN_JUMP_UNCONDITIONAL;
1981 dest_off = insn->offset + insn->len + insn->immediate;
1982 insn->jump_dest = find_insn(file, insn->sec, dest_off);
1983 if (!insn->jump_dest) {
1984 WARN_FUNC("can't find call dest at %s+0x%lx",
1985 insn->sec, insn->offset,
1986 insn->sec->name, dest_off);
1994 static int classify_symbols(struct objtool_file *file)
1996 struct section *sec;
1997 struct symbol *func;
1999 for_each_sec(file, sec) {
2000 list_for_each_entry(func, &sec->symbol_list, list) {
2001 if (func->bind != STB_GLOBAL)
2004 if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
2005 strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
2006 func->static_call_tramp = true;
2008 if (arch_is_retpoline(func))
2009 func->retpoline_thunk = true;
2011 if (!strcmp(func->name, "__fentry__"))
2012 func->fentry = true;
2014 if (!strncmp(func->name, "__sanitizer_cov_", 16))
2022 static void mark_rodata(struct objtool_file *file)
2024 struct section *sec;
2028 * Search for the following rodata sections, each of which can
2029 * potentially contain jump tables:
2031 * - .rodata: can contain GCC switch tables
2032 * - .rodata.<func>: same, if -fdata-sections is being used
2033 * - .rodata..c_jump_table: contains C annotated jump tables
2035 * .rodata.str1.* sections are ignored; they don't contain jump tables.
2037 for_each_sec(file, sec) {
2038 if (!strncmp(sec->name, ".rodata", 7) &&
2039 !strstr(sec->name, ".str1.")) {
2045 file->rodata = found;
2048 static int decode_sections(struct objtool_file *file)
2054 ret = init_pv_ops(file);
2058 ret = decode_instructions(file);
2062 ret = add_dead_ends(file);
2067 add_uaccess_safe(file);
2069 ret = add_ignore_alternatives(file);
2074 * Must be before add_{jump_call}_destination.
2076 ret = classify_symbols(file);
2081 * Must be before add_special_section_alts() as that depends on
2082 * jump_dest being set.
2084 ret = add_jump_destinations(file);
2088 ret = add_special_section_alts(file);
2093 * Must be before add_call_destination(); it changes INSN_CALL to
2096 ret = read_intra_function_calls(file);
2100 ret = add_call_destinations(file);
2104 ret = add_jump_table_alts(file);
2108 ret = read_unwind_hints(file);
2112 ret = read_retpoline_hints(file);
2116 ret = read_instr_hints(file);
2123 static bool is_fentry_call(struct instruction *insn)
2125 if (insn->type == INSN_CALL &&
2127 insn->call_dest->fentry)
2133 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
2135 struct cfi_state *cfi = &state->cfi;
2138 if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
2141 if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
2144 if (cfi->stack_size != initial_func_cfi.cfa.offset)
2147 for (i = 0; i < CFI_NUM_REGS; i++) {
2148 if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
2149 cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
2156 static bool check_reg_frame_pos(const struct cfi_reg *reg,
2157 int expected_offset)
2159 return reg->base == CFI_CFA &&
2160 reg->offset == expected_offset;
2163 static bool has_valid_stack_frame(struct insn_state *state)
2165 struct cfi_state *cfi = &state->cfi;
2167 if (cfi->cfa.base == CFI_BP &&
2168 check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
2169 check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
2172 if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
2178 static int update_cfi_state_regs(struct instruction *insn,
2179 struct cfi_state *cfi,
2180 struct stack_op *op)
2182 struct cfi_reg *cfa = &cfi->cfa;
2184 if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
2188 if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
2192 if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
2195 /* add immediate to sp */
2196 if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
2197 op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
2198 cfa->offset -= op->src.offset;
2203 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
2205 if (arch_callee_saved_reg(reg) &&
2206 cfi->regs[reg].base == CFI_UNDEFINED) {
2207 cfi->regs[reg].base = base;
2208 cfi->regs[reg].offset = offset;
2212 static void restore_reg(struct cfi_state *cfi, unsigned char reg)
2214 cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
2215 cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
2219 * A note about DRAP stack alignment:
2221 * GCC has the concept of a DRAP register, which is used to help keep track of
2222 * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP
2223 * register. The typical DRAP pattern is:
2225 * 4c 8d 54 24 08 lea 0x8(%rsp),%r10
2226 * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp
2227 * 41 ff 72 f8 pushq -0x8(%r10)
2229 * 48 89 e5 mov %rsp,%rbp
2236 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2239 * There are some variations in the epilogues, like:
2247 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2252 * 4c 8b 55 e8 mov -0x18(%rbp),%r10
2253 * 48 8b 5d e0 mov -0x20(%rbp),%rbx
2254 * 4c 8b 65 f0 mov -0x10(%rbp),%r12
2255 * 4c 8b 6d f8 mov -0x8(%rbp),%r13
2257 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2260 * Sometimes r13 is used as the DRAP register, in which case it's saved and
2261 * restored beforehand:
2264 * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13
2265 * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp
2267 * 49 8d 65 f0 lea -0x10(%r13),%rsp
2271 static int update_cfi_state(struct instruction *insn,
2272 struct instruction *next_insn,
2273 struct cfi_state *cfi, struct stack_op *op)
2275 struct cfi_reg *cfa = &cfi->cfa;
2276 struct cfi_reg *regs = cfi->regs;
2278 /* stack operations don't make sense with an undefined CFA */
2279 if (cfa->base == CFI_UNDEFINED) {
2281 WARN_FUNC("undefined stack state", insn->sec, insn->offset);
2287 if (cfi->type == UNWIND_HINT_TYPE_REGS ||
2288 cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
2289 return update_cfi_state_regs(insn, cfi, op);
2291 switch (op->dest.type) {
2294 switch (op->src.type) {
2297 if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
2298 cfa->base == CFI_SP &&
2299 check_reg_frame_pos(®s[CFI_BP], -cfa->offset)) {
2301 /* mov %rsp, %rbp */
2302 cfa->base = op->dest.reg;
2303 cfi->bp_scratch = false;
2306 else if (op->src.reg == CFI_SP &&
2307 op->dest.reg == CFI_BP && cfi->drap) {
2309 /* drap: mov %rsp, %rbp */
2310 regs[CFI_BP].base = CFI_BP;
2311 regs[CFI_BP].offset = -cfi->stack_size;
2312 cfi->bp_scratch = false;
2315 else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2320 * This is needed for the rare case where GCC
2327 cfi->vals[op->dest.reg].base = CFI_CFA;
2328 cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2331 else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
2332 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
2337 * Restore the original stack pointer (Clang).
2339 cfi->stack_size = -cfi->regs[CFI_BP].offset;
2342 else if (op->dest.reg == cfa->base) {
2344 /* mov %reg, %rsp */
2345 if (cfa->base == CFI_SP &&
2346 cfi->vals[op->src.reg].base == CFI_CFA) {
2349 * This is needed for the rare case
2350 * where GCC does something dumb like:
2352 * lea 0x8(%rsp), %rcx
2356 cfa->offset = -cfi->vals[op->src.reg].offset;
2357 cfi->stack_size = cfa->offset;
2359 } else if (cfa->base == CFI_SP &&
2360 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2361 cfi->vals[op->src.reg].offset == cfa->offset) {
2366 * 1: mov %rsp, (%[tos])
2367 * 2: mov %[tos], %rsp
2373 * 1 - places a pointer to the previous
2374 * stack at the Top-of-Stack of the
2377 * 2 - switches to the new stack.
2379 * 3 - pops the Top-of-Stack to restore
2380 * the original stack.
2382 * Note: we set base to SP_INDIRECT
2383 * here and preserve offset. Therefore
2384 * when the unwinder reaches ToS it
2385 * will dereference SP and then add the
2386 * offset to find the next frame, IOW:
2389 cfa->base = CFI_SP_INDIRECT;
2392 cfa->base = CFI_UNDEFINED;
2397 else if (op->dest.reg == CFI_SP &&
2398 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2399 cfi->vals[op->src.reg].offset == cfa->offset) {
2402 * The same stack swizzle case 2) as above. But
2403 * because we can't change cfa->base, case 3)
2404 * will become a regular POP. Pretend we're a
2405 * PUSH so things don't go unbalanced.
2407 cfi->stack_size += 8;
2414 if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
2417 cfi->stack_size -= op->src.offset;
2418 if (cfa->base == CFI_SP)
2419 cfa->offset -= op->src.offset;
2423 if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
2425 /* lea disp(%rbp), %rsp */
2426 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
2430 if (!cfi->drap && op->src.reg == CFI_SP &&
2431 op->dest.reg == CFI_BP && cfa->base == CFI_SP &&
2432 check_reg_frame_pos(®s[CFI_BP], -cfa->offset + op->src.offset)) {
2434 /* lea disp(%rsp), %rbp */
2436 cfa->offset -= op->src.offset;
2437 cfi->bp_scratch = false;
2441 if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2443 /* drap: lea disp(%rsp), %drap */
2444 cfi->drap_reg = op->dest.reg;
2447 * lea disp(%rsp), %reg
2449 * This is needed for the rare case where GCC
2450 * does something dumb like:
2452 * lea 0x8(%rsp), %rcx
2456 cfi->vals[op->dest.reg].base = CFI_CFA;
2457 cfi->vals[op->dest.reg].offset = \
2458 -cfi->stack_size + op->src.offset;
2463 if (cfi->drap && op->dest.reg == CFI_SP &&
2464 op->src.reg == cfi->drap_reg) {
2466 /* drap: lea disp(%drap), %rsp */
2468 cfa->offset = cfi->stack_size = -op->src.offset;
2469 cfi->drap_reg = CFI_UNDEFINED;
2474 if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
2475 WARN_FUNC("unsupported stack register modification",
2476 insn->sec, insn->offset);
2483 if (op->dest.reg != CFI_SP ||
2484 (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
2485 (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
2486 WARN_FUNC("unsupported stack pointer realignment",
2487 insn->sec, insn->offset);
2491 if (cfi->drap_reg != CFI_UNDEFINED) {
2492 /* drap: and imm, %rsp */
2493 cfa->base = cfi->drap_reg;
2494 cfa->offset = cfi->stack_size = 0;
2499 * Older versions of GCC (4.8ish) realign the stack
2500 * without DRAP, with a frame pointer.
2507 if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
2509 /* pop %rsp; # restore from a stack swizzle */
2514 if (!cfi->drap && op->dest.reg == cfa->base) {
2520 if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
2521 op->dest.reg == cfi->drap_reg &&
2522 cfi->drap_offset == -cfi->stack_size) {
2524 /* drap: pop %drap */
2525 cfa->base = cfi->drap_reg;
2527 cfi->drap_offset = -1;
2529 } else if (cfi->stack_size == -regs[op->dest.reg].offset) {
2532 restore_reg(cfi, op->dest.reg);
2535 cfi->stack_size -= 8;
2536 if (cfa->base == CFI_SP)
2541 case OP_SRC_REG_INDIRECT:
2542 if (!cfi->drap && op->dest.reg == cfa->base &&
2543 op->dest.reg == CFI_BP) {
2545 /* mov disp(%rsp), %rbp */
2547 cfa->offset = cfi->stack_size;
2550 if (cfi->drap && op->src.reg == CFI_BP &&
2551 op->src.offset == cfi->drap_offset) {
2553 /* drap: mov disp(%rbp), %drap */
2554 cfa->base = cfi->drap_reg;
2556 cfi->drap_offset = -1;
2559 if (cfi->drap && op->src.reg == CFI_BP &&
2560 op->src.offset == regs[op->dest.reg].offset) {
2562 /* drap: mov disp(%rbp), %reg */
2563 restore_reg(cfi, op->dest.reg);
2565 } else if (op->src.reg == cfa->base &&
2566 op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
2568 /* mov disp(%rbp), %reg */
2569 /* mov disp(%rsp), %reg */
2570 restore_reg(cfi, op->dest.reg);
2572 } else if (op->src.reg == CFI_SP &&
2573 op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
2575 /* mov disp(%rsp), %reg */
2576 restore_reg(cfi, op->dest.reg);
2582 WARN_FUNC("unknown stack-related instruction",
2583 insn->sec, insn->offset);
2591 cfi->stack_size += 8;
2592 if (cfa->base == CFI_SP)
2595 if (op->src.type != OP_SRC_REG)
2599 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
2601 /* drap: push %drap */
2602 cfa->base = CFI_BP_INDIRECT;
2603 cfa->offset = -cfi->stack_size;
2605 /* save drap so we know when to restore it */
2606 cfi->drap_offset = -cfi->stack_size;
2608 } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
2610 /* drap: push %rbp */
2611 cfi->stack_size = 0;
2615 /* drap: push %reg */
2616 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
2622 save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
2625 /* detect when asm code uses rbp as a scratch register */
2626 if (!no_fp && insn->func && op->src.reg == CFI_BP &&
2627 cfa->base != CFI_BP)
2628 cfi->bp_scratch = true;
2631 case OP_DEST_REG_INDIRECT:
2634 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
2636 /* drap: mov %drap, disp(%rbp) */
2637 cfa->base = CFI_BP_INDIRECT;
2638 cfa->offset = op->dest.offset;
2640 /* save drap offset so we know when to restore it */
2641 cfi->drap_offset = op->dest.offset;
2644 /* drap: mov reg, disp(%rbp) */
2645 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
2648 } else if (op->dest.reg == cfa->base) {
2650 /* mov reg, disp(%rbp) */
2651 /* mov reg, disp(%rsp) */
2652 save_reg(cfi, op->src.reg, CFI_CFA,
2653 op->dest.offset - cfi->cfa.offset);
2655 } else if (op->dest.reg == CFI_SP) {
2657 /* mov reg, disp(%rsp) */
2658 save_reg(cfi, op->src.reg, CFI_CFA,
2659 op->dest.offset - cfi->stack_size);
2661 } else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
2663 /* mov %rsp, (%reg); # setup a stack swizzle. */
2664 cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
2665 cfi->vals[op->dest.reg].offset = cfa->offset;
2671 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
2672 WARN_FUNC("unknown stack-related memory operation",
2673 insn->sec, insn->offset);
2678 cfi->stack_size -= 8;
2679 if (cfa->base == CFI_SP)
2685 WARN_FUNC("unknown stack-related instruction",
2686 insn->sec, insn->offset);
2694 * The stack layouts of alternatives instructions can sometimes diverge when
2695 * they have stack modifications. That's fine as long as the potential stack
2696 * layouts don't conflict at any given potential instruction boundary.
2698 * Flatten the CFIs of the different alternative code streams (both original
2699 * and replacement) into a single shared CFI array which can be used to detect
2700 * conflicts and nicely feed a linear array of ORC entries to the unwinder.
2702 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
2704 struct cfi_state **alt_cfi;
2707 if (!insn->alt_group)
2711 WARN("CFI missing");
2715 alt_cfi = insn->alt_group->cfi;
2716 group_off = insn->offset - insn->alt_group->first_insn->offset;
2718 if (!alt_cfi[group_off]) {
2719 alt_cfi[group_off] = insn->cfi;
2721 if (cficmp(alt_cfi[group_off], insn->cfi)) {
2722 WARN_FUNC("stack layout conflict in alternatives",
2723 insn->sec, insn->offset);
2731 static int handle_insn_ops(struct instruction *insn,
2732 struct instruction *next_insn,
2733 struct insn_state *state)
2735 struct stack_op *op;
2737 list_for_each_entry(op, &insn->stack_ops, list) {
2739 if (update_cfi_state(insn, next_insn, &state->cfi, op))
2742 if (!insn->alt_group)
2745 if (op->dest.type == OP_DEST_PUSHF) {
2746 if (!state->uaccess_stack) {
2747 state->uaccess_stack = 1;
2748 } else if (state->uaccess_stack >> 31) {
2749 WARN_FUNC("PUSHF stack exhausted",
2750 insn->sec, insn->offset);
2753 state->uaccess_stack <<= 1;
2754 state->uaccess_stack |= state->uaccess;
2757 if (op->src.type == OP_SRC_POPF) {
2758 if (state->uaccess_stack) {
2759 state->uaccess = state->uaccess_stack & 1;
2760 state->uaccess_stack >>= 1;
2761 if (state->uaccess_stack == 1)
2762 state->uaccess_stack = 0;
2770 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
2772 struct cfi_state *cfi1 = insn->cfi;
2776 WARN("CFI missing");
2780 if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
2782 WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
2783 insn->sec, insn->offset,
2784 cfi1->cfa.base, cfi1->cfa.offset,
2785 cfi2->cfa.base, cfi2->cfa.offset);
2787 } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
2788 for (i = 0; i < CFI_NUM_REGS; i++) {
2789 if (!memcmp(&cfi1->regs[i], &cfi2->regs[i],
2790 sizeof(struct cfi_reg)))
2793 WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
2794 insn->sec, insn->offset,
2795 i, cfi1->regs[i].base, cfi1->regs[i].offset,
2796 i, cfi2->regs[i].base, cfi2->regs[i].offset);
2800 } else if (cfi1->type != cfi2->type) {
2802 WARN_FUNC("stack state mismatch: type1=%d type2=%d",
2803 insn->sec, insn->offset, cfi1->type, cfi2->type);
2805 } else if (cfi1->drap != cfi2->drap ||
2806 (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
2807 (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
2809 WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
2810 insn->sec, insn->offset,
2811 cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
2812 cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
2820 static inline bool func_uaccess_safe(struct symbol *func)
2823 return func->uaccess_safe;
2828 static inline const char *call_dest_name(struct instruction *insn)
2830 static char pvname[16];
2834 if (insn->call_dest)
2835 return insn->call_dest->name;
2837 rel = insn_reloc(NULL, insn);
2838 if (rel && !strcmp(rel->sym->name, "pv_ops")) {
2839 idx = (rel->addend / sizeof(void *));
2840 snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx);
2847 static bool pv_call_dest(struct objtool_file *file, struct instruction *insn)
2849 struct symbol *target;
2853 rel = insn_reloc(file, insn);
2854 if (!rel || strcmp(rel->sym->name, "pv_ops"))
2857 idx = (arch_dest_reloc_offset(rel->addend) / sizeof(void *));
2859 if (file->pv_ops[idx].clean)
2862 file->pv_ops[idx].clean = true;
2864 list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) {
2865 if (!target->sec->noinstr) {
2866 WARN("pv_ops[%d]: %s", idx, target->name);
2867 file->pv_ops[idx].clean = false;
2871 return file->pv_ops[idx].clean;
2874 static inline bool noinstr_call_dest(struct objtool_file *file,
2875 struct instruction *insn,
2876 struct symbol *func)
2879 * We can't deal with indirect function calls at present;
2880 * assume they're instrumented.
2884 return pv_call_dest(file, insn);
2890 * If the symbol is from a noinstr section; we good.
2892 if (func->sec->noinstr)
2896 * The __ubsan_handle_*() calls are like WARN(), they only happen when
2897 * something 'BAD' happened. At the risk of taking the machine down,
2898 * let them proceed to get the message out.
2900 if (!strncmp(func->name, "__ubsan_handle_", 15))
2906 static int validate_call(struct objtool_file *file,
2907 struct instruction *insn,
2908 struct insn_state *state)
2910 if (state->noinstr && state->instr <= 0 &&
2911 !noinstr_call_dest(file, insn, insn->call_dest)) {
2912 WARN_FUNC("call to %s() leaves .noinstr.text section",
2913 insn->sec, insn->offset, call_dest_name(insn));
2917 if (state->uaccess && !func_uaccess_safe(insn->call_dest)) {
2918 WARN_FUNC("call to %s() with UACCESS enabled",
2919 insn->sec, insn->offset, call_dest_name(insn));
2924 WARN_FUNC("call to %s() with DF set",
2925 insn->sec, insn->offset, call_dest_name(insn));
2932 static int validate_sibling_call(struct objtool_file *file,
2933 struct instruction *insn,
2934 struct insn_state *state)
2936 if (has_modified_stack_frame(insn, state)) {
2937 WARN_FUNC("sibling call from callable instruction with modified stack frame",
2938 insn->sec, insn->offset);
2942 return validate_call(file, insn, state);
2945 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
2947 if (state->noinstr && state->instr > 0) {
2948 WARN_FUNC("return with instrumentation enabled",
2949 insn->sec, insn->offset);
2953 if (state->uaccess && !func_uaccess_safe(func)) {
2954 WARN_FUNC("return with UACCESS enabled",
2955 insn->sec, insn->offset);
2959 if (!state->uaccess && func_uaccess_safe(func)) {
2960 WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function",
2961 insn->sec, insn->offset);
2966 WARN_FUNC("return with DF set",
2967 insn->sec, insn->offset);
2971 if (func && has_modified_stack_frame(insn, state)) {
2972 WARN_FUNC("return with modified stack frame",
2973 insn->sec, insn->offset);
2977 if (state->cfi.bp_scratch) {
2978 WARN_FUNC("BP used as a scratch register",
2979 insn->sec, insn->offset);
2986 static struct instruction *next_insn_to_validate(struct objtool_file *file,
2987 struct instruction *insn)
2989 struct alt_group *alt_group = insn->alt_group;
2992 * Simulate the fact that alternatives are patched in-place. When the
2993 * end of a replacement alt_group is reached, redirect objtool flow to
2994 * the end of the original alt_group.
2996 if (alt_group && insn == alt_group->last_insn && alt_group->orig_group)
2997 return next_insn_same_sec(file, alt_group->orig_group->last_insn);
2999 return next_insn_same_sec(file, insn);
3003 * Follow the branch starting at the given instruction, and recursively follow
3004 * any other branches (jumps). Meanwhile, track the frame pointer state at
3005 * each instruction and validate all the rules described in
3006 * tools/objtool/Documentation/stack-validation.txt.
3008 static int validate_branch(struct objtool_file *file, struct symbol *func,
3009 struct instruction *insn, struct insn_state state)
3011 struct alternative *alt;
3012 struct instruction *next_insn, *prev_insn = NULL;
3013 struct section *sec;
3020 next_insn = next_insn_to_validate(file, insn);
3022 if (file->c_file && func && insn->func && func != insn->func->pfunc) {
3023 WARN("%s() falls through to next function %s()",
3024 func->name, insn->func->name);
3028 if (func && insn->ignore) {
3029 WARN_FUNC("BUG: why am I validating an ignored function?",
3034 visited = 1 << state.uaccess;
3035 if (insn->visited) {
3036 if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
3039 if (insn->visited & visited)
3046 state.instr += insn->instr;
3049 state.cfi = *insn->cfi;
3051 /* XXX track if we actually changed state.cfi */
3053 if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) {
3054 insn->cfi = prev_insn->cfi;
3057 insn->cfi = cfi_hash_find_or_add(&state.cfi);
3061 insn->visited |= visited;
3063 if (propagate_alt_cfi(file, insn))
3066 if (!insn->ignore_alts && !list_empty(&insn->alts)) {
3067 bool skip_orig = false;
3069 list_for_each_entry(alt, &insn->alts, list) {
3073 ret = validate_branch(file, func, alt->insn, state);
3076 BT_FUNC("(alt)", insn);
3085 if (handle_insn_ops(insn, next_insn, &state))
3088 switch (insn->type) {
3091 return validate_return(func, insn, &state);
3094 case INSN_CALL_DYNAMIC:
3095 ret = validate_call(file, insn, &state);
3099 if (!no_fp && func && !is_fentry_call(insn) &&
3100 !has_valid_stack_frame(&state)) {
3101 WARN_FUNC("call without frame pointer save/setup",
3106 if (dead_end_function(file, insn->call_dest))
3111 case INSN_JUMP_CONDITIONAL:
3112 case INSN_JUMP_UNCONDITIONAL:
3113 if (is_sibling_call(insn)) {
3114 ret = validate_sibling_call(file, insn, &state);
3118 } else if (insn->jump_dest) {
3119 ret = validate_branch(file, func,
3120 insn->jump_dest, state);
3123 BT_FUNC("(branch)", insn);
3128 if (insn->type == INSN_JUMP_UNCONDITIONAL)
3133 case INSN_JUMP_DYNAMIC:
3134 case INSN_JUMP_DYNAMIC_CONDITIONAL:
3135 if (is_sibling_call(insn)) {
3136 ret = validate_sibling_call(file, insn, &state);
3141 if (insn->type == INSN_JUMP_DYNAMIC)
3146 case INSN_CONTEXT_SWITCH:
3147 if (func && (!next_insn || !next_insn->hint)) {
3148 WARN_FUNC("unsupported instruction in callable function",
3155 if (state.uaccess) {
3156 WARN_FUNC("recursive UACCESS enable", sec, insn->offset);
3160 state.uaccess = true;
3164 if (!state.uaccess && func) {
3165 WARN_FUNC("redundant UACCESS disable", sec, insn->offset);
3169 if (func_uaccess_safe(func) && !state.uaccess_stack) {
3170 WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset);
3174 state.uaccess = false;
3179 WARN_FUNC("recursive STD", sec, insn->offset);
3187 if (!state.df && func) {
3188 WARN_FUNC("redundant CLD", sec, insn->offset);
3203 if (state.cfi.cfa.base == CFI_UNDEFINED)
3205 WARN("%s: unexpected end of section", sec->name);
3216 static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
3218 struct instruction *insn;
3219 struct insn_state state;
3220 int ret, warnings = 0;
3225 init_insn_state(&state, sec);
3228 insn = find_insn(file, sec, 0);
3232 insn = list_first_entry(&file->insn_list, typeof(*insn), list);
3235 while (&insn->list != &file->insn_list && (!sec || insn->sec == sec)) {
3236 if (insn->hint && !insn->visited && !insn->ignore) {
3237 ret = validate_branch(file, insn->func, insn, state);
3238 if (ret && backtrace)
3239 BT_FUNC("<=== (hint)", insn);
3243 insn = list_next_entry(insn, list);
3249 static int validate_retpoline(struct objtool_file *file)
3251 struct instruction *insn;
3254 for_each_insn(file, insn) {
3255 if (insn->type != INSN_JUMP_DYNAMIC &&
3256 insn->type != INSN_CALL_DYNAMIC)
3259 if (insn->retpoline_safe)
3263 * .init.text code is ran before userspace and thus doesn't
3264 * strictly need retpolines, except for modules which are
3265 * loaded late, they very much do need retpoline in their
3268 if (!strcmp(insn->sec->name, ".init.text") && !module)
3271 WARN_FUNC("indirect %s found in RETPOLINE build",
3272 insn->sec, insn->offset,
3273 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
3281 static bool is_kasan_insn(struct instruction *insn)
3283 return (insn->type == INSN_CALL &&
3284 !strcmp(insn->call_dest->name, "__asan_handle_no_return"));
3287 static bool is_ubsan_insn(struct instruction *insn)
3289 return (insn->type == INSN_CALL &&
3290 !strcmp(insn->call_dest->name,
3291 "__ubsan_handle_builtin_unreachable"));
3294 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
3297 struct instruction *prev_insn;
3299 if (insn->ignore || insn->type == INSN_NOP)
3303 * Ignore any unused exceptions. This can happen when a whitelisted
3304 * function has an exception table entry.
3306 * Also ignore alternative replacement instructions. This can happen
3307 * when a whitelisted function uses one of the ALTERNATIVE macros.
3309 if (!strcmp(insn->sec->name, ".fixup") ||
3310 !strcmp(insn->sec->name, ".altinstr_replacement") ||
3311 !strcmp(insn->sec->name, ".altinstr_aux"))
3317 if (insn->func->static_call_tramp)
3321 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
3322 * __builtin_unreachable(). The BUG() macro has an unreachable() after
3323 * the UD2, which causes GCC's undefined trap logic to emit another UD2
3324 * (or occasionally a JMP to UD2).
3326 * It may also insert a UD2 after calling a __noreturn function.
3328 prev_insn = list_prev_entry(insn, list);
3329 if ((prev_insn->dead_end || dead_end_function(file, prev_insn->call_dest)) &&
3330 (insn->type == INSN_BUG ||
3331 (insn->type == INSN_JUMP_UNCONDITIONAL &&
3332 insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
3336 * Check if this (or a subsequent) instruction is related to
3337 * CONFIG_UBSAN or CONFIG_KASAN.
3339 * End the search at 5 instructions to avoid going into the weeds.
3341 for (i = 0; i < 5; i++) {
3343 if (is_kasan_insn(insn) || is_ubsan_insn(insn))
3346 if (insn->type == INSN_JUMP_UNCONDITIONAL) {
3347 if (insn->jump_dest &&
3348 insn->jump_dest->func == insn->func) {
3349 insn = insn->jump_dest;
3356 if (insn->offset + insn->len >= insn->func->offset + insn->func->len)
3359 insn = list_next_entry(insn, list);
3365 static int validate_symbol(struct objtool_file *file, struct section *sec,
3366 struct symbol *sym, struct insn_state *state)
3368 struct instruction *insn;
3372 WARN("%s() is missing an ELF size annotation", sym->name);
3376 if (sym->pfunc != sym || sym->alias != sym)
3379 insn = find_insn(file, sec, sym->offset);
3380 if (!insn || insn->ignore || insn->visited)
3383 state->uaccess = sym->uaccess_safe;
3385 ret = validate_branch(file, insn->func, insn, *state);
3386 if (ret && backtrace)
3387 BT_FUNC("<=== (sym)", insn);
3391 static int validate_section(struct objtool_file *file, struct section *sec)
3393 struct insn_state state;
3394 struct symbol *func;
3397 list_for_each_entry(func, &sec->symbol_list, list) {
3398 if (func->type != STT_FUNC)
3401 init_insn_state(&state, sec);
3402 set_func_state(&state.cfi);
3404 warnings += validate_symbol(file, sec, func, &state);
3410 static int validate_vmlinux_functions(struct objtool_file *file)
3412 struct section *sec;
3415 sec = find_section_by_name(file->elf, ".noinstr.text");
3417 warnings += validate_section(file, sec);
3418 warnings += validate_unwind_hints(file, sec);
3421 sec = find_section_by_name(file->elf, ".entry.text");
3423 warnings += validate_section(file, sec);
3424 warnings += validate_unwind_hints(file, sec);
3430 static int validate_functions(struct objtool_file *file)
3432 struct section *sec;
3435 for_each_sec(file, sec) {
3436 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
3439 warnings += validate_section(file, sec);
3445 static int validate_reachable_instructions(struct objtool_file *file)
3447 struct instruction *insn;
3449 if (file->ignore_unreachables)
3452 for_each_insn(file, insn) {
3453 if (insn->visited || ignore_unreachable_insn(file, insn))
3456 WARN_FUNC("unreachable instruction", insn->sec, insn->offset);
3463 int check(struct objtool_file *file)
3465 int ret, warnings = 0;
3467 arch_initial_func_cfi_state(&initial_func_cfi);
3468 init_cfi_state(&init_cfi);
3469 init_cfi_state(&func_cfi);
3470 set_func_state(&func_cfi);
3472 if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3)))
3475 cfi_hash_add(&init_cfi);
3476 cfi_hash_add(&func_cfi);
3478 ret = decode_sections(file);
3484 if (list_empty(&file->insn_list))
3487 if (vmlinux && !validate_dup) {
3488 ret = validate_vmlinux_functions(file);
3497 ret = validate_retpoline(file);
3503 ret = validate_functions(file);
3508 ret = validate_unwind_hints(file, NULL);
3514 ret = validate_reachable_instructions(file);
3520 ret = create_static_call_sections(file);
3526 ret = create_retpoline_sites_sections(file);
3533 ret = create_mcount_loc_sections(file);
3540 printf("nr_insns_visited: %ld\n", nr_insns_visited);
3541 printf("nr_cfi: %ld\n", nr_cfi);
3542 printf("nr_cfi_reused: %ld\n", nr_cfi_reused);
3543 printf("nr_cfi_cache: %ld\n", nr_cfi_cache);
3548 * For now, don't fail the kernel build on fatal warnings. These
3549 * errors are still fairly common due to the growing matrix of
3550 * supported toolchains and their recent pace of change.