1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
12 #include <objtool/builtin.h>
13 #include <objtool/cfi.h>
14 #include <objtool/arch.h>
15 #include <objtool/check.h>
16 #include <objtool/special.h>
17 #include <objtool/warn.h>
18 #include <objtool/endianness.h>
20 #include <linux/objtool.h>
21 #include <linux/hashtable.h>
22 #include <linux/kernel.h>
23 #include <linux/static_call_types.h>
26 struct list_head list;
27 struct instruction *insn;
31 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
33 static struct cfi_init_state initial_func_cfi;
34 static struct cfi_state init_cfi;
35 static struct cfi_state func_cfi;
37 struct instruction *find_insn(struct objtool_file *file,
38 struct section *sec, unsigned long offset)
40 struct instruction *insn;
42 hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
43 if (insn->sec == sec && insn->offset == offset)
50 static struct instruction *next_insn_same_sec(struct objtool_file *file,
51 struct instruction *insn)
53 struct instruction *next = list_next_entry(insn, list);
55 if (!next || &next->list == &file->insn_list || next->sec != insn->sec)
61 static struct instruction *next_insn_same_func(struct objtool_file *file,
62 struct instruction *insn)
64 struct instruction *next = list_next_entry(insn, list);
65 struct symbol *func = insn->func;
70 if (&next->list != &file->insn_list && next->func == func)
73 /* Check if we're already in the subfunction: */
74 if (func == func->cfunc)
77 /* Move to the subfunction: */
78 return find_insn(file, func->cfunc->sec, func->cfunc->offset);
81 static struct instruction *prev_insn_same_sym(struct objtool_file *file,
82 struct instruction *insn)
84 struct instruction *prev = list_prev_entry(insn, list);
86 if (&prev->list != &file->insn_list && prev->func == insn->func)
92 #define func_for_each_insn(file, func, insn) \
93 for (insn = find_insn(file, func->sec, func->offset); \
95 insn = next_insn_same_func(file, insn))
97 #define sym_for_each_insn(file, sym, insn) \
98 for (insn = find_insn(file, sym->sec, sym->offset); \
99 insn && &insn->list != &file->insn_list && \
100 insn->sec == sym->sec && \
101 insn->offset < sym->offset + sym->len; \
102 insn = list_next_entry(insn, list))
104 #define sym_for_each_insn_continue_reverse(file, sym, insn) \
105 for (insn = list_prev_entry(insn, list); \
106 &insn->list != &file->insn_list && \
107 insn->sec == sym->sec && insn->offset >= sym->offset; \
108 insn = list_prev_entry(insn, list))
110 #define sec_for_each_insn_from(file, insn) \
111 for (; insn; insn = next_insn_same_sec(file, insn))
113 #define sec_for_each_insn_continue(file, insn) \
114 for (insn = next_insn_same_sec(file, insn); insn; \
115 insn = next_insn_same_sec(file, insn))
117 static bool is_jump_table_jump(struct instruction *insn)
119 struct alt_group *alt_group = insn->alt_group;
121 if (insn->jump_table)
124 /* Retpoline alternative for a jump table? */
125 return alt_group && alt_group->orig_group &&
126 alt_group->orig_group->first_insn->jump_table;
129 static bool is_sibling_call(struct instruction *insn)
132 * Assume only ELF functions can make sibling calls. This ensures
133 * sibling call detection consistency between vmlinux.o and individual
139 /* An indirect jump is either a sibling call or a jump to a table. */
140 if (insn->type == INSN_JUMP_DYNAMIC)
141 return !is_jump_table_jump(insn);
143 /* add_jump_destinations() sets insn->call_dest for sibling calls. */
144 return (is_static_jump(insn) && insn->call_dest);
148 * This checks to see if the given function is a "noreturn" function.
150 * For global functions which are outside the scope of this object file, we
151 * have to keep a manual list of them.
153 * For local functions, we have to detect them manually by simply looking for
154 * the lack of a return instruction.
156 static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
160 struct instruction *insn;
164 * Unfortunately these have to be hard coded because the noreturn
165 * attribute isn't provided in ELF data.
167 static const char * const global_noreturns[] = {
173 "__module_put_and_exit",
179 "machine_real_restart",
180 "rewind_stack_and_make_dead"
181 "kunit_try_catch_throw",
183 "cpu_bringup_and_idle",
189 if (func->bind == STB_WEAK)
192 if (func->bind == STB_GLOBAL)
193 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
194 if (!strcmp(func->name, global_noreturns[i]))
200 insn = find_insn(file, func->sec, func->offset);
201 if (!insn || !insn->func)
204 func_for_each_insn(file, func, insn) {
207 if (insn->type == INSN_RETURN)
215 * A function can have a sibling call instead of a return. In that
216 * case, the function's dead-end status depends on whether the target
217 * of the sibling call returns.
219 func_for_each_insn(file, func, insn) {
220 if (is_sibling_call(insn)) {
221 struct instruction *dest = insn->jump_dest;
224 /* sibling call to another file */
227 /* local sibling call */
228 if (recursion == 5) {
230 * Infinite recursion: two functions have
231 * sibling calls to each other. This is a very
232 * rare case. It means they aren't dead ends.
237 return __dead_end_function(file, dest->func, recursion+1);
244 static bool dead_end_function(struct objtool_file *file, struct symbol *func)
246 return __dead_end_function(file, func, 0);
249 static void init_cfi_state(struct cfi_state *cfi)
253 for (i = 0; i < CFI_NUM_REGS; i++) {
254 cfi->regs[i].base = CFI_UNDEFINED;
255 cfi->vals[i].base = CFI_UNDEFINED;
257 cfi->cfa.base = CFI_UNDEFINED;
258 cfi->drap_reg = CFI_UNDEFINED;
259 cfi->drap_offset = -1;
262 static void init_insn_state(struct insn_state *state, struct section *sec)
264 memset(state, 0, sizeof(*state));
265 init_cfi_state(&state->cfi);
268 * We need the full vmlinux for noinstr validation, otherwise we can
269 * not correctly determine insn->call_dest->sec (external symbols do
270 * not have a section).
272 if (vmlinux && noinstr && sec)
273 state->noinstr = sec->noinstr;
276 static struct cfi_state *cfi_alloc(void)
278 struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1);
280 WARN("calloc failed");
288 static struct hlist_head *cfi_hash;
290 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2)
292 return memcmp((void *)cfi1 + sizeof(cfi1->hash),
293 (void *)cfi2 + sizeof(cfi2->hash),
294 sizeof(struct cfi_state) - sizeof(struct hlist_node));
297 static inline u32 cfi_key(struct cfi_state *cfi)
299 return jhash((void *)cfi + sizeof(cfi->hash),
300 sizeof(*cfi) - sizeof(cfi->hash), 0);
303 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi)
305 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
306 struct cfi_state *obj;
308 hlist_for_each_entry(obj, head, hash) {
309 if (!cficmp(cfi, obj)) {
317 hlist_add_head(&obj->hash, head);
322 static void cfi_hash_add(struct cfi_state *cfi)
324 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
326 hlist_add_head(&cfi->hash, head);
329 static void *cfi_hash_alloc(unsigned long size)
331 cfi_bits = max(10, ilog2(size));
332 cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits,
333 PROT_READ|PROT_WRITE,
334 MAP_PRIVATE|MAP_ANON, -1, 0);
335 if (cfi_hash == (void *)-1L) {
336 WARN("mmap fail cfi_hash");
339 printf("cfi_bits: %d\n", cfi_bits);
345 static unsigned long nr_insns;
346 static unsigned long nr_insns_visited;
349 * Call the arch-specific instruction decoder for all the instructions and add
350 * them to the global instruction list.
352 static int decode_instructions(struct objtool_file *file)
356 unsigned long offset;
357 struct instruction *insn;
360 for_each_sec(file, sec) {
362 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
365 if (strcmp(sec->name, ".altinstr_replacement") &&
366 strcmp(sec->name, ".altinstr_aux") &&
367 strncmp(sec->name, ".discard.", 9))
370 if (!strcmp(sec->name, ".noinstr.text") ||
371 !strcmp(sec->name, ".entry.text") ||
372 !strncmp(sec->name, ".text.__x86.", 12))
375 for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
376 insn = malloc(sizeof(*insn));
378 WARN("malloc failed");
381 memset(insn, 0, sizeof(*insn));
382 INIT_LIST_HEAD(&insn->alts);
383 INIT_LIST_HEAD(&insn->stack_ops);
386 insn->offset = offset;
388 ret = arch_decode_instruction(file->elf, sec, offset,
389 sec->sh.sh_size - offset,
390 &insn->len, &insn->type,
396 hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
397 list_add_tail(&insn->list, &file->insn_list);
401 list_for_each_entry(func, &sec->symbol_list, list) {
402 if (func->type != STT_FUNC || func->alias != func)
405 if (!find_insn(file, sec, func->offset)) {
406 WARN("%s(): can't find starting instruction",
411 sym_for_each_insn(file, func, insn)
417 printf("nr_insns: %lu\n", nr_insns);
426 static struct instruction *find_last_insn(struct objtool_file *file,
429 struct instruction *insn = NULL;
431 unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0;
433 for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--)
434 insn = find_insn(file, sec, offset);
440 * Mark "ud2" instructions and manually annotated dead ends.
442 static int add_dead_ends(struct objtool_file *file)
446 struct instruction *insn;
449 * By default, "ud2" is a dead end unless otherwise annotated, because
450 * GCC 7 inserts it for certain divide-by-zero cases.
452 for_each_insn(file, insn)
453 if (insn->type == INSN_BUG)
454 insn->dead_end = true;
457 * Check for manually annotated dead ends.
459 sec = find_section_by_name(file->elf, ".rela.discard.unreachable");
463 list_for_each_entry(reloc, &sec->reloc_list, list) {
464 if (reloc->sym->type != STT_SECTION) {
465 WARN("unexpected relocation symbol type in %s", sec->name);
468 insn = find_insn(file, reloc->sym->sec, reloc->addend);
470 insn = list_prev_entry(insn, list);
471 else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
472 insn = find_last_insn(file, reloc->sym->sec);
474 WARN("can't find unreachable insn at %s+0x%" PRIx64,
475 reloc->sym->sec->name, reloc->addend);
479 WARN("can't find unreachable insn at %s+0x%" PRIx64,
480 reloc->sym->sec->name, reloc->addend);
484 insn->dead_end = true;
489 * These manually annotated reachable checks are needed for GCC 4.4,
490 * where the Linux unreachable() macro isn't supported. In that case
491 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's
494 sec = find_section_by_name(file->elf, ".rela.discard.reachable");
498 list_for_each_entry(reloc, &sec->reloc_list, list) {
499 if (reloc->sym->type != STT_SECTION) {
500 WARN("unexpected relocation symbol type in %s", sec->name);
503 insn = find_insn(file, reloc->sym->sec, reloc->addend);
505 insn = list_prev_entry(insn, list);
506 else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
507 insn = find_last_insn(file, reloc->sym->sec);
509 WARN("can't find reachable insn at %s+0x%" PRIx64,
510 reloc->sym->sec->name, reloc->addend);
514 WARN("can't find reachable insn at %s+0x%" PRIx64,
515 reloc->sym->sec->name, reloc->addend);
519 insn->dead_end = false;
525 static int create_static_call_sections(struct objtool_file *file)
528 struct static_call_site *site;
529 struct instruction *insn;
530 struct symbol *key_sym;
531 char *key_name, *tmp;
534 sec = find_section_by_name(file->elf, ".static_call_sites");
536 INIT_LIST_HEAD(&file->static_call_list);
537 WARN("file already has .static_call_sites section, skipping");
541 if (list_empty(&file->static_call_list))
545 list_for_each_entry(insn, &file->static_call_list, call_node)
548 sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE,
549 sizeof(struct static_call_site), idx);
554 list_for_each_entry(insn, &file->static_call_list, call_node) {
556 site = (struct static_call_site *)sec->data->d_buf + idx;
557 memset(site, 0, sizeof(struct static_call_site));
559 /* populate reloc for 'addr' */
560 if (elf_add_reloc_to_insn(file->elf, sec,
561 idx * sizeof(struct static_call_site),
563 insn->sec, insn->offset))
566 /* find key symbol */
567 key_name = strdup(insn->call_dest->name);
572 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
573 STATIC_CALL_TRAMP_PREFIX_LEN)) {
574 WARN("static_call: trampoline name malformed: %s", key_name);
577 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
578 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
580 key_sym = find_symbol_by_name(file->elf, tmp);
583 WARN("static_call: can't find static_call_key symbol: %s", tmp);
588 * For modules(), the key might not be exported, which
589 * means the module can make static calls but isn't
590 * allowed to change them.
592 * In that case we temporarily set the key to be the
593 * trampoline address. This is fixed up in
594 * static_call_add_module().
596 key_sym = insn->call_dest;
600 /* populate reloc for 'key' */
601 if (elf_add_reloc(file->elf, sec,
602 idx * sizeof(struct static_call_site) + 4,
603 R_X86_64_PC32, key_sym,
604 is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
613 static int create_retpoline_sites_sections(struct objtool_file *file)
615 struct instruction *insn;
619 sec = find_section_by_name(file->elf, ".retpoline_sites");
621 WARN("file already has .retpoline_sites, skipping");
626 list_for_each_entry(insn, &file->retpoline_call_list, call_node)
632 sec = elf_create_section(file->elf, ".retpoline_sites", 0,
635 WARN("elf_create_section: .retpoline_sites");
640 list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
642 int *site = (int *)sec->data->d_buf + idx;
645 if (elf_add_reloc_to_insn(file->elf, sec,
648 insn->sec, insn->offset)) {
649 WARN("elf_add_reloc_to_insn: .retpoline_sites");
659 static int create_return_sites_sections(struct objtool_file *file)
661 struct instruction *insn;
665 sec = find_section_by_name(file->elf, ".return_sites");
667 WARN("file already has .return_sites, skipping");
672 list_for_each_entry(insn, &file->return_thunk_list, call_node)
678 sec = elf_create_section(file->elf, ".return_sites", 0,
681 WARN("elf_create_section: .return_sites");
686 list_for_each_entry(insn, &file->return_thunk_list, call_node) {
688 int *site = (int *)sec->data->d_buf + idx;
691 if (elf_add_reloc_to_insn(file->elf, sec,
694 insn->sec, insn->offset)) {
695 WARN("elf_add_reloc_to_insn: .return_sites");
705 static int create_mcount_loc_sections(struct objtool_file *file)
709 struct instruction *insn;
712 sec = find_section_by_name(file->elf, "__mcount_loc");
714 INIT_LIST_HEAD(&file->mcount_loc_list);
715 WARN("file already has __mcount_loc section, skipping");
719 if (list_empty(&file->mcount_loc_list))
723 list_for_each_entry(insn, &file->mcount_loc_list, call_node)
726 sec = elf_create_section(file->elf, "__mcount_loc", 0, sizeof(unsigned long), idx);
731 list_for_each_entry(insn, &file->mcount_loc_list, call_node) {
733 loc = (unsigned long *)sec->data->d_buf + idx;
734 memset(loc, 0, sizeof(unsigned long));
736 if (elf_add_reloc_to_insn(file->elf, sec,
737 idx * sizeof(unsigned long),
739 insn->sec, insn->offset))
749 * Warnings shouldn't be reported for ignored functions.
751 static void add_ignores(struct objtool_file *file)
753 struct instruction *insn;
758 sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
762 list_for_each_entry(reloc, &sec->reloc_list, list) {
763 switch (reloc->sym->type) {
769 func = find_func_by_offset(reloc->sym->sec, reloc->addend);
775 WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type);
779 func_for_each_insn(file, func, insn)
785 * This is a whitelist of functions that is allowed to be called with AC set.
786 * The list is meant to be minimal and only contains compiler instrumentation
787 * ABI and a few functions used to implement *_{to,from}_user() functions.
789 * These functions must not directly change AC, but may PUSHF/POPF.
791 static const char *uaccess_safe_builtin[] = {
795 /* KASAN out-of-line */
796 "__asan_loadN_noabort",
797 "__asan_load1_noabort",
798 "__asan_load2_noabort",
799 "__asan_load4_noabort",
800 "__asan_load8_noabort",
801 "__asan_load16_noabort",
802 "__asan_storeN_noabort",
803 "__asan_store1_noabort",
804 "__asan_store2_noabort",
805 "__asan_store4_noabort",
806 "__asan_store8_noabort",
807 "__asan_store16_noabort",
808 "__kasan_check_read",
809 "__kasan_check_write",
811 "__asan_report_load_n_noabort",
812 "__asan_report_load1_noabort",
813 "__asan_report_load2_noabort",
814 "__asan_report_load4_noabort",
815 "__asan_report_load8_noabort",
816 "__asan_report_load16_noabort",
817 "__asan_report_store_n_noabort",
818 "__asan_report_store1_noabort",
819 "__asan_report_store2_noabort",
820 "__asan_report_store4_noabort",
821 "__asan_report_store8_noabort",
822 "__asan_report_store16_noabort",
824 "__kcsan_check_access",
825 "kcsan_found_watchpoint",
826 "kcsan_setup_watchpoint",
827 "kcsan_check_scoped_accesses",
828 "kcsan_disable_current",
829 "kcsan_enable_current_nowarn",
834 "__tsan_write_range",
845 "__tsan_read_write1",
846 "__tsan_read_write2",
847 "__tsan_read_write4",
848 "__tsan_read_write8",
849 "__tsan_read_write16",
850 "__tsan_volatile_read1",
851 "__tsan_volatile_read2",
852 "__tsan_volatile_read4",
853 "__tsan_volatile_read8",
854 "__tsan_volatile_read16",
855 "__tsan_volatile_write1",
856 "__tsan_volatile_write2",
857 "__tsan_volatile_write4",
858 "__tsan_volatile_write8",
859 "__tsan_volatile_write16",
860 "__tsan_atomic8_load",
861 "__tsan_atomic16_load",
862 "__tsan_atomic32_load",
863 "__tsan_atomic64_load",
864 "__tsan_atomic8_store",
865 "__tsan_atomic16_store",
866 "__tsan_atomic32_store",
867 "__tsan_atomic64_store",
868 "__tsan_atomic8_exchange",
869 "__tsan_atomic16_exchange",
870 "__tsan_atomic32_exchange",
871 "__tsan_atomic64_exchange",
872 "__tsan_atomic8_fetch_add",
873 "__tsan_atomic16_fetch_add",
874 "__tsan_atomic32_fetch_add",
875 "__tsan_atomic64_fetch_add",
876 "__tsan_atomic8_fetch_sub",
877 "__tsan_atomic16_fetch_sub",
878 "__tsan_atomic32_fetch_sub",
879 "__tsan_atomic64_fetch_sub",
880 "__tsan_atomic8_fetch_and",
881 "__tsan_atomic16_fetch_and",
882 "__tsan_atomic32_fetch_and",
883 "__tsan_atomic64_fetch_and",
884 "__tsan_atomic8_fetch_or",
885 "__tsan_atomic16_fetch_or",
886 "__tsan_atomic32_fetch_or",
887 "__tsan_atomic64_fetch_or",
888 "__tsan_atomic8_fetch_xor",
889 "__tsan_atomic16_fetch_xor",
890 "__tsan_atomic32_fetch_xor",
891 "__tsan_atomic64_fetch_xor",
892 "__tsan_atomic8_fetch_nand",
893 "__tsan_atomic16_fetch_nand",
894 "__tsan_atomic32_fetch_nand",
895 "__tsan_atomic64_fetch_nand",
896 "__tsan_atomic8_compare_exchange_strong",
897 "__tsan_atomic16_compare_exchange_strong",
898 "__tsan_atomic32_compare_exchange_strong",
899 "__tsan_atomic64_compare_exchange_strong",
900 "__tsan_atomic8_compare_exchange_weak",
901 "__tsan_atomic16_compare_exchange_weak",
902 "__tsan_atomic32_compare_exchange_weak",
903 "__tsan_atomic64_compare_exchange_weak",
904 "__tsan_atomic8_compare_exchange_val",
905 "__tsan_atomic16_compare_exchange_val",
906 "__tsan_atomic32_compare_exchange_val",
907 "__tsan_atomic64_compare_exchange_val",
908 "__tsan_atomic_thread_fence",
909 "__tsan_atomic_signal_fence",
913 "__sanitizer_cov_trace_pc",
914 "__sanitizer_cov_trace_const_cmp1",
915 "__sanitizer_cov_trace_const_cmp2",
916 "__sanitizer_cov_trace_const_cmp4",
917 "__sanitizer_cov_trace_const_cmp8",
918 "__sanitizer_cov_trace_cmp1",
919 "__sanitizer_cov_trace_cmp2",
920 "__sanitizer_cov_trace_cmp4",
921 "__sanitizer_cov_trace_cmp8",
922 "__sanitizer_cov_trace_switch",
924 "ubsan_type_mismatch_common",
925 "__ubsan_handle_type_mismatch",
926 "__ubsan_handle_type_mismatch_v1",
927 "__ubsan_handle_shift_out_of_bounds",
929 "csum_partial_copy_generic",
931 "copy_mc_fragile_handle_tail",
932 "copy_mc_enhanced_fast_string",
933 "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
937 static void add_uaccess_safe(struct objtool_file *file)
945 for (name = uaccess_safe_builtin; *name; name++) {
946 func = find_symbol_by_name(file->elf, *name);
950 func->uaccess_safe = true;
955 * FIXME: For now, just ignore any alternatives which add retpolines. This is
956 * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
957 * But it at least allows objtool to understand the control flow *around* the
960 static int add_ignore_alternatives(struct objtool_file *file)
964 struct instruction *insn;
966 sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts");
970 list_for_each_entry(reloc, &sec->reloc_list, list) {
971 if (reloc->sym->type != STT_SECTION) {
972 WARN("unexpected relocation symbol type in %s", sec->name);
976 insn = find_insn(file, reloc->sym->sec, reloc->addend);
978 WARN("bad .discard.ignore_alts entry");
982 insn->ignore_alts = true;
988 __weak bool arch_is_retpoline(struct symbol *sym)
993 __weak bool arch_is_rethunk(struct symbol *sym)
998 #define NEGATIVE_RELOC ((void *)-1L)
1000 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
1002 if (insn->reloc == NEGATIVE_RELOC)
1006 insn->reloc = find_reloc_by_dest_range(file->elf, insn->sec,
1007 insn->offset, insn->len);
1009 insn->reloc = NEGATIVE_RELOC;
1017 static void remove_insn_ops(struct instruction *insn)
1019 struct stack_op *op, *tmp;
1021 list_for_each_entry_safe(op, tmp, &insn->stack_ops, list) {
1022 list_del(&op->list);
1027 static void annotate_call_site(struct objtool_file *file,
1028 struct instruction *insn, bool sibling)
1030 struct reloc *reloc = insn_reloc(file, insn);
1031 struct symbol *sym = insn->call_dest;
1037 * Alternative replacement code is just template code which is
1038 * sometimes copied to the original instruction. For now, don't
1039 * annotate it. (In the future we might consider annotating the
1040 * original instruction if/when it ever makes sense to do so.)
1042 if (!strcmp(insn->sec->name, ".altinstr_replacement"))
1045 if (sym->static_call_tramp) {
1046 list_add_tail(&insn->call_node, &file->static_call_list);
1050 if (sym->retpoline_thunk) {
1051 list_add_tail(&insn->call_node, &file->retpoline_call_list);
1056 * Many compilers cannot disable KCOV with a function attribute
1057 * so they need a little help, NOP out any KCOV calls from noinstr
1060 if (insn->sec->noinstr && sym->kcov) {
1062 reloc->type = R_NONE;
1063 elf_write_reloc(file->elf, reloc);
1066 elf_write_insn(file->elf, insn->sec,
1067 insn->offset, insn->len,
1068 sibling ? arch_ret_insn(insn->len)
1069 : arch_nop_insn(insn->len));
1071 insn->type = sibling ? INSN_RETURN : INSN_NOP;
1075 * We've replaced the tail-call JMP insn by two new
1076 * insn: RET; INT3, except we only have a single struct
1077 * insn here. Mark it retpoline_safe to avoid the SLS
1078 * warning, instead of adding another insn.
1080 insn->retpoline_safe = true;
1086 if (mcount && sym->fentry) {
1088 WARN_FUNC("Tail call to __fentry__ !?!?", insn->sec, insn->offset);
1091 reloc->type = R_NONE;
1092 elf_write_reloc(file->elf, reloc);
1095 elf_write_insn(file->elf, insn->sec,
1096 insn->offset, insn->len,
1097 arch_nop_insn(insn->len));
1099 insn->type = INSN_NOP;
1101 list_add_tail(&insn->call_node, &file->mcount_loc_list);
1106 static void add_call_dest(struct objtool_file *file, struct instruction *insn,
1107 struct symbol *dest, bool sibling)
1109 insn->call_dest = dest;
1114 * Whatever stack impact regular CALLs have, should be undone
1115 * by the RETURN of the called function.
1117 * Annotated intra-function calls retain the stack_ops but
1118 * are converted to JUMP, see read_intra_function_calls().
1120 remove_insn_ops(insn);
1122 annotate_call_site(file, insn, sibling);
1125 static void add_retpoline_call(struct objtool_file *file, struct instruction *insn)
1128 * Retpoline calls/jumps are really dynamic calls/jumps in disguise,
1129 * so convert them accordingly.
1131 switch (insn->type) {
1133 insn->type = INSN_CALL_DYNAMIC;
1135 case INSN_JUMP_UNCONDITIONAL:
1136 insn->type = INSN_JUMP_DYNAMIC;
1138 case INSN_JUMP_CONDITIONAL:
1139 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
1145 insn->retpoline_safe = true;
1148 * Whatever stack impact regular CALLs have, should be undone
1149 * by the RETURN of the called function.
1151 * Annotated intra-function calls retain the stack_ops but
1152 * are converted to JUMP, see read_intra_function_calls().
1154 remove_insn_ops(insn);
1156 annotate_call_site(file, insn, false);
1159 static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add)
1162 * Return thunk tail calls are really just returns in disguise,
1163 * so convert them accordingly.
1165 insn->type = INSN_RETURN;
1166 insn->retpoline_safe = true;
1168 /* Skip the non-text sections, specially .discard ones */
1169 if (add && insn->sec->text)
1170 list_add_tail(&insn->call_node, &file->return_thunk_list);
1174 * Find the destination instructions for all jumps.
1176 static int add_jump_destinations(struct objtool_file *file)
1178 struct instruction *insn;
1179 struct reloc *reloc;
1180 struct section *dest_sec;
1181 unsigned long dest_off;
1183 for_each_insn(file, insn) {
1184 if (!is_static_jump(insn))
1187 reloc = insn_reloc(file, insn);
1189 dest_sec = insn->sec;
1190 dest_off = arch_jump_destination(insn);
1191 } else if (reloc->sym->type == STT_SECTION) {
1192 dest_sec = reloc->sym->sec;
1193 dest_off = arch_dest_reloc_offset(reloc->addend);
1194 } else if (reloc->sym->retpoline_thunk) {
1195 add_retpoline_call(file, insn);
1197 } else if (reloc->sym->return_thunk) {
1198 add_return_call(file, insn, true);
1200 } else if (insn->func) {
1201 /* internal or external sibling call (with reloc) */
1202 add_call_dest(file, insn, reloc->sym, true);
1204 } else if (reloc->sym->sec->idx) {
1205 dest_sec = reloc->sym->sec;
1206 dest_off = reloc->sym->sym.st_value +
1207 arch_dest_reloc_offset(reloc->addend);
1209 /* non-func asm code jumping to another file */
1213 insn->jump_dest = find_insn(file, dest_sec, dest_off);
1214 if (!insn->jump_dest) {
1215 struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
1218 * This is a special case where an alt instruction
1219 * jumps past the end of the section. These are
1220 * handled later in handle_group_alt().
1222 if (!strcmp(insn->sec->name, ".altinstr_replacement"))
1226 * This is a special case for zen_untrain_ret().
1227 * It jumps to __x86_return_thunk(), but objtool
1228 * can't find the thunk's starting RET
1229 * instruction, because the RET is also in the
1230 * middle of another instruction. Objtool only
1231 * knows about the outer instruction.
1233 if (sym && sym->return_thunk) {
1234 add_return_call(file, insn, false);
1238 WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
1239 insn->sec, insn->offset, dest_sec->name,
1245 * Cross-function jump.
1247 if (insn->func && insn->jump_dest->func &&
1248 insn->func != insn->jump_dest->func) {
1251 * For GCC 8+, create parent/child links for any cold
1252 * subfunctions. This is _mostly_ redundant with a
1253 * similar initialization in read_symbols().
1255 * If a function has aliases, we want the *first* such
1256 * function in the symbol table to be the subfunction's
1257 * parent. In that case we overwrite the
1258 * initialization done in read_symbols().
1260 * However this code can't completely replace the
1261 * read_symbols() code because this doesn't detect the
1262 * case where the parent function's only reference to a
1263 * subfunction is through a jump table.
1265 if (!strstr(insn->func->name, ".cold") &&
1266 strstr(insn->jump_dest->func->name, ".cold")) {
1267 insn->func->cfunc = insn->jump_dest->func;
1268 insn->jump_dest->func->pfunc = insn->func;
1270 } else if (insn->jump_dest->func->pfunc != insn->func->pfunc &&
1271 insn->jump_dest->offset == insn->jump_dest->func->offset) {
1272 /* internal sibling call (without reloc) */
1273 add_call_dest(file, insn, insn->jump_dest->func, true);
1281 static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
1283 struct symbol *call_dest;
1285 call_dest = find_func_by_offset(sec, offset);
1287 call_dest = find_symbol_by_offset(sec, offset);
1293 * Find the destination instructions for all calls.
1295 static int add_call_destinations(struct objtool_file *file)
1297 struct instruction *insn;
1298 unsigned long dest_off;
1299 struct symbol *dest;
1300 struct reloc *reloc;
1302 for_each_insn(file, insn) {
1303 if (insn->type != INSN_CALL)
1306 reloc = insn_reloc(file, insn);
1308 dest_off = arch_jump_destination(insn);
1309 dest = find_call_destination(insn->sec, dest_off);
1311 add_call_dest(file, insn, dest, false);
1316 if (!insn->call_dest) {
1317 WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset);
1321 if (insn->func && insn->call_dest->type != STT_FUNC) {
1322 WARN_FUNC("unsupported call to non-function",
1323 insn->sec, insn->offset);
1327 } else if (reloc->sym->type == STT_SECTION) {
1328 dest_off = arch_dest_reloc_offset(reloc->addend);
1329 dest = find_call_destination(reloc->sym->sec, dest_off);
1331 WARN_FUNC("can't find call dest symbol at %s+0x%lx",
1332 insn->sec, insn->offset,
1333 reloc->sym->sec->name,
1338 add_call_dest(file, insn, dest, false);
1340 } else if (reloc->sym->retpoline_thunk) {
1341 add_retpoline_call(file, insn);
1344 add_call_dest(file, insn, reloc->sym, false);
1351 * The .alternatives section requires some extra special care over and above
1352 * other special sections because alternatives are patched in place.
1354 static int handle_group_alt(struct objtool_file *file,
1355 struct special_alt *special_alt,
1356 struct instruction *orig_insn,
1357 struct instruction **new_insn)
1359 struct instruction *last_orig_insn, *last_new_insn = NULL, *insn, *nop = NULL;
1360 struct alt_group *orig_alt_group, *new_alt_group;
1361 unsigned long dest_off;
1364 orig_alt_group = malloc(sizeof(*orig_alt_group));
1365 if (!orig_alt_group) {
1366 WARN("malloc failed");
1369 orig_alt_group->cfi = calloc(special_alt->orig_len,
1370 sizeof(struct cfi_state *));
1371 if (!orig_alt_group->cfi) {
1372 WARN("calloc failed");
1376 last_orig_insn = NULL;
1378 sec_for_each_insn_from(file, insn) {
1379 if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
1382 insn->alt_group = orig_alt_group;
1383 last_orig_insn = insn;
1385 orig_alt_group->orig_group = NULL;
1386 orig_alt_group->first_insn = orig_insn;
1387 orig_alt_group->last_insn = last_orig_insn;
1390 new_alt_group = malloc(sizeof(*new_alt_group));
1391 if (!new_alt_group) {
1392 WARN("malloc failed");
1396 if (special_alt->new_len < special_alt->orig_len) {
1398 * Insert a fake nop at the end to make the replacement
1399 * alt_group the same size as the original. This is needed to
1400 * allow propagate_alt_cfi() to do its magic. When the last
1401 * instruction affects the stack, the instruction after it (the
1402 * nop) will propagate the new state to the shared CFI array.
1404 nop = malloc(sizeof(*nop));
1406 WARN("malloc failed");
1409 memset(nop, 0, sizeof(*nop));
1410 INIT_LIST_HEAD(&nop->alts);
1411 INIT_LIST_HEAD(&nop->stack_ops);
1413 nop->sec = special_alt->new_sec;
1414 nop->offset = special_alt->new_off + special_alt->new_len;
1415 nop->len = special_alt->orig_len - special_alt->new_len;
1416 nop->type = INSN_NOP;
1417 nop->func = orig_insn->func;
1418 nop->alt_group = new_alt_group;
1419 nop->ignore = orig_insn->ignore_alts;
1422 if (!special_alt->new_len) {
1428 sec_for_each_insn_from(file, insn) {
1429 struct reloc *alt_reloc;
1431 if (insn->offset >= special_alt->new_off + special_alt->new_len)
1434 last_new_insn = insn;
1436 insn->ignore = orig_insn->ignore_alts;
1437 insn->func = orig_insn->func;
1438 insn->alt_group = new_alt_group;
1441 * Since alternative replacement code is copy/pasted by the
1442 * kernel after applying relocations, generally such code can't
1443 * have relative-address relocation references to outside the
1444 * .altinstr_replacement section, unless the arch's
1445 * alternatives code can adjust the relative offsets
1448 alt_reloc = insn_reloc(file, insn);
1450 !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1452 WARN_FUNC("unsupported relocation in alternatives section",
1453 insn->sec, insn->offset);
1457 if (!is_static_jump(insn))
1460 if (!insn->immediate)
1463 dest_off = arch_jump_destination(insn);
1464 if (dest_off == special_alt->new_off + special_alt->new_len)
1465 insn->jump_dest = next_insn_same_sec(file, last_orig_insn);
1467 if (!insn->jump_dest) {
1468 WARN_FUNC("can't find alternative jump destination",
1469 insn->sec, insn->offset);
1474 if (!last_new_insn) {
1475 WARN_FUNC("can't find last new alternative instruction",
1476 special_alt->new_sec, special_alt->new_off);
1481 list_add(&nop->list, &last_new_insn->list);
1483 new_alt_group->orig_group = orig_alt_group;
1484 new_alt_group->first_insn = *new_insn;
1485 new_alt_group->last_insn = nop ? : last_new_insn;
1486 new_alt_group->cfi = orig_alt_group->cfi;
1491 * A jump table entry can either convert a nop to a jump or a jump to a nop.
1492 * If the original instruction is a jump, make the alt entry an effective nop
1493 * by just skipping the original instruction.
1495 static int handle_jump_alt(struct objtool_file *file,
1496 struct special_alt *special_alt,
1497 struct instruction *orig_insn,
1498 struct instruction **new_insn)
1500 if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
1501 orig_insn->type != INSN_NOP) {
1503 WARN_FUNC("unsupported instruction at jump label",
1504 orig_insn->sec, orig_insn->offset);
1508 if (special_alt->key_addend & 2) {
1509 struct reloc *reloc = insn_reloc(file, orig_insn);
1512 reloc->type = R_NONE;
1513 elf_write_reloc(file->elf, reloc);
1515 elf_write_insn(file->elf, orig_insn->sec,
1516 orig_insn->offset, orig_insn->len,
1517 arch_nop_insn(orig_insn->len));
1518 orig_insn->type = INSN_NOP;
1521 if (orig_insn->type == INSN_NOP) {
1522 if (orig_insn->len == 2)
1523 file->jl_nop_short++;
1525 file->jl_nop_long++;
1530 if (orig_insn->len == 2)
1535 *new_insn = list_next_entry(orig_insn, list);
1540 * Read all the special sections which have alternate instructions which can be
1541 * patched in or redirected to at runtime. Each instruction having alternate
1542 * instruction(s) has them added to its insn->alts list, which will be
1543 * traversed in validate_branch().
1545 static int add_special_section_alts(struct objtool_file *file)
1547 struct list_head special_alts;
1548 struct instruction *orig_insn, *new_insn;
1549 struct special_alt *special_alt, *tmp;
1550 struct alternative *alt;
1553 ret = special_get_alts(file->elf, &special_alts);
1557 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
1559 orig_insn = find_insn(file, special_alt->orig_sec,
1560 special_alt->orig_off);
1562 WARN_FUNC("special: can't find orig instruction",
1563 special_alt->orig_sec, special_alt->orig_off);
1569 if (!special_alt->group || special_alt->new_len) {
1570 new_insn = find_insn(file, special_alt->new_sec,
1571 special_alt->new_off);
1573 WARN_FUNC("special: can't find new instruction",
1574 special_alt->new_sec,
1575 special_alt->new_off);
1581 if (special_alt->group) {
1582 if (!special_alt->orig_len) {
1583 WARN_FUNC("empty alternative entry",
1584 orig_insn->sec, orig_insn->offset);
1588 ret = handle_group_alt(file, special_alt, orig_insn,
1592 } else if (special_alt->jump_or_nop) {
1593 ret = handle_jump_alt(file, special_alt, orig_insn,
1599 alt = malloc(sizeof(*alt));
1601 WARN("malloc failed");
1606 alt->insn = new_insn;
1607 alt->skip_orig = special_alt->skip_orig;
1608 orig_insn->ignore_alts |= special_alt->skip_alt;
1609 list_add_tail(&alt->list, &orig_insn->alts);
1611 list_del(&special_alt->list);
1616 printf("jl\\\tNOP\tJMP\n");
1617 printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
1618 printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
1625 static int add_jump_table(struct objtool_file *file, struct instruction *insn,
1626 struct reloc *table)
1628 struct reloc *reloc = table;
1629 struct instruction *dest_insn;
1630 struct alternative *alt;
1631 struct symbol *pfunc = insn->func->pfunc;
1632 unsigned int prev_offset = 0;
1635 * Each @reloc is a switch table relocation which points to the target
1638 list_for_each_entry_from(reloc, &table->sec->reloc_list, list) {
1640 /* Check for the end of the table: */
1641 if (reloc != table && reloc->jump_table_start)
1644 /* Make sure the table entries are consecutive: */
1645 if (prev_offset && reloc->offset != prev_offset + 8)
1648 /* Detect function pointers from contiguous objects: */
1649 if (reloc->sym->sec == pfunc->sec &&
1650 reloc->addend == pfunc->offset)
1653 dest_insn = find_insn(file, reloc->sym->sec, reloc->addend);
1657 /* Make sure the destination is in the same function: */
1658 if (!dest_insn->func || dest_insn->func->pfunc != pfunc)
1661 alt = malloc(sizeof(*alt));
1663 WARN("malloc failed");
1667 alt->insn = dest_insn;
1668 list_add_tail(&alt->list, &insn->alts);
1669 prev_offset = reloc->offset;
1673 WARN_FUNC("can't find switch jump table",
1674 insn->sec, insn->offset);
1682 * find_jump_table() - Given a dynamic jump, find the switch jump table
1683 * associated with it.
1685 static struct reloc *find_jump_table(struct objtool_file *file,
1686 struct symbol *func,
1687 struct instruction *insn)
1689 struct reloc *table_reloc;
1690 struct instruction *dest_insn, *orig_insn = insn;
1693 * Backward search using the @first_jump_src links, these help avoid
1694 * much of the 'in between' code. Which avoids us getting confused by
1698 insn && insn->func && insn->func->pfunc == func;
1699 insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
1701 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
1704 /* allow small jumps within the range */
1705 if (insn->type == INSN_JUMP_UNCONDITIONAL &&
1707 (insn->jump_dest->offset <= insn->offset ||
1708 insn->jump_dest->offset > orig_insn->offset))
1711 table_reloc = arch_find_switch_table(file, insn);
1714 dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend);
1715 if (!dest_insn || !dest_insn->func || dest_insn->func->pfunc != func)
1725 * First pass: Mark the head of each jump table so that in the next pass,
1726 * we know when a given jump table ends and the next one starts.
1728 static void mark_func_jump_tables(struct objtool_file *file,
1729 struct symbol *func)
1731 struct instruction *insn, *last = NULL;
1732 struct reloc *reloc;
1734 func_for_each_insn(file, func, insn) {
1739 * Store back-pointers for unconditional forward jumps such
1740 * that find_jump_table() can back-track using those and
1741 * avoid some potentially confusing code.
1743 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
1744 insn->offset > last->offset &&
1745 insn->jump_dest->offset > insn->offset &&
1746 !insn->jump_dest->first_jump_src) {
1748 insn->jump_dest->first_jump_src = insn;
1749 last = insn->jump_dest;
1752 if (insn->type != INSN_JUMP_DYNAMIC)
1755 reloc = find_jump_table(file, func, insn);
1757 reloc->jump_table_start = true;
1758 insn->jump_table = reloc;
1763 static int add_func_jump_tables(struct objtool_file *file,
1764 struct symbol *func)
1766 struct instruction *insn;
1769 func_for_each_insn(file, func, insn) {
1770 if (!insn->jump_table)
1773 ret = add_jump_table(file, insn, insn->jump_table);
1782 * For some switch statements, gcc generates a jump table in the .rodata
1783 * section which contains a list of addresses within the function to jump to.
1784 * This finds these jump tables and adds them to the insn->alts lists.
1786 static int add_jump_table_alts(struct objtool_file *file)
1788 struct section *sec;
1789 struct symbol *func;
1795 for_each_sec(file, sec) {
1796 list_for_each_entry(func, &sec->symbol_list, list) {
1797 if (func->type != STT_FUNC)
1800 mark_func_jump_tables(file, func);
1801 ret = add_func_jump_tables(file, func);
1810 static void set_func_state(struct cfi_state *state)
1812 state->cfa = initial_func_cfi.cfa;
1813 memcpy(&state->regs, &initial_func_cfi.regs,
1814 CFI_NUM_REGS * sizeof(struct cfi_reg));
1815 state->stack_size = initial_func_cfi.cfa.offset;
1818 static int read_unwind_hints(struct objtool_file *file)
1820 struct cfi_state cfi = init_cfi;
1821 struct section *sec, *relocsec;
1822 struct unwind_hint *hint;
1823 struct instruction *insn;
1824 struct reloc *reloc;
1827 sec = find_section_by_name(file->elf, ".discard.unwind_hints");
1831 relocsec = sec->reloc;
1833 WARN("missing .rela.discard.unwind_hints section");
1837 if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
1838 WARN("struct unwind_hint size mismatch");
1844 for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) {
1845 hint = (struct unwind_hint *)sec->data->d_buf + i;
1847 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
1849 WARN("can't find reloc for unwind_hints[%d]", i);
1853 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1855 WARN("can't find insn for unwind_hints[%d]", i);
1861 if (hint->type == UNWIND_HINT_TYPE_SAVE) {
1867 if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
1868 insn->restore = true;
1872 if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
1873 struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);
1875 if (sym && sym->bind == STB_GLOBAL) {
1880 if (hint->type == UNWIND_HINT_TYPE_ENTRY) {
1881 hint->type = UNWIND_HINT_TYPE_CALL;
1885 if (hint->type == UNWIND_HINT_TYPE_FUNC) {
1886 insn->cfi = &func_cfi;
1893 if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
1894 WARN_FUNC("unsupported unwind_hint sp base reg %d",
1895 insn->sec, insn->offset, hint->sp_reg);
1899 cfi.cfa.offset = bswap_if_needed(hint->sp_offset);
1900 cfi.type = hint->type;
1901 cfi.end = hint->end;
1903 insn->cfi = cfi_hash_find_or_add(&cfi);
1909 static int read_retpoline_hints(struct objtool_file *file)
1911 struct section *sec;
1912 struct instruction *insn;
1913 struct reloc *reloc;
1915 sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
1919 list_for_each_entry(reloc, &sec->reloc_list, list) {
1920 if (reloc->sym->type != STT_SECTION) {
1921 WARN("unexpected relocation symbol type in %s", sec->name);
1925 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1927 WARN("bad .discard.retpoline_safe entry");
1931 if (insn->type != INSN_JUMP_DYNAMIC &&
1932 insn->type != INSN_CALL_DYNAMIC &&
1933 insn->type != INSN_RETURN &&
1934 insn->type != INSN_NOP) {
1935 WARN_FUNC("retpoline_safe hint not an indirect jump/call/ret/nop",
1936 insn->sec, insn->offset);
1940 insn->retpoline_safe = true;
1946 static int read_instr_hints(struct objtool_file *file)
1948 struct section *sec;
1949 struct instruction *insn;
1950 struct reloc *reloc;
1952 sec = find_section_by_name(file->elf, ".rela.discard.instr_end");
1956 list_for_each_entry(reloc, &sec->reloc_list, list) {
1957 if (reloc->sym->type != STT_SECTION) {
1958 WARN("unexpected relocation symbol type in %s", sec->name);
1962 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1964 WARN("bad .discard.instr_end entry");
1971 sec = find_section_by_name(file->elf, ".rela.discard.instr_begin");
1975 list_for_each_entry(reloc, &sec->reloc_list, list) {
1976 if (reloc->sym->type != STT_SECTION) {
1977 WARN("unexpected relocation symbol type in %s", sec->name);
1981 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1983 WARN("bad .discard.instr_begin entry");
1993 static int read_intra_function_calls(struct objtool_file *file)
1995 struct instruction *insn;
1996 struct section *sec;
1997 struct reloc *reloc;
1999 sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls");
2003 list_for_each_entry(reloc, &sec->reloc_list, list) {
2004 unsigned long dest_off;
2006 if (reloc->sym->type != STT_SECTION) {
2007 WARN("unexpected relocation symbol type in %s",
2012 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2014 WARN("bad .discard.intra_function_call entry");
2018 if (insn->type != INSN_CALL) {
2019 WARN_FUNC("intra_function_call not a direct call",
2020 insn->sec, insn->offset);
2025 * Treat intra-function CALLs as JMPs, but with a stack_op.
2026 * See add_call_destinations(), which strips stack_ops from
2029 insn->type = INSN_JUMP_UNCONDITIONAL;
2031 dest_off = insn->offset + insn->len + insn->immediate;
2032 insn->jump_dest = find_insn(file, insn->sec, dest_off);
2033 if (!insn->jump_dest) {
2034 WARN_FUNC("can't find call dest at %s+0x%lx",
2035 insn->sec, insn->offset,
2036 insn->sec->name, dest_off);
2044 static int classify_symbols(struct objtool_file *file)
2046 struct section *sec;
2047 struct symbol *func;
2049 for_each_sec(file, sec) {
2050 list_for_each_entry(func, &sec->symbol_list, list) {
2051 if (func->bind != STB_GLOBAL)
2054 if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
2055 strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
2056 func->static_call_tramp = true;
2058 if (arch_is_retpoline(func))
2059 func->retpoline_thunk = true;
2061 if (arch_is_rethunk(func))
2062 func->return_thunk = true;
2064 if (!strcmp(func->name, "__fentry__"))
2065 func->fentry = true;
2067 if (!strncmp(func->name, "__sanitizer_cov_", 16))
2075 static void mark_rodata(struct objtool_file *file)
2077 struct section *sec;
2081 * Search for the following rodata sections, each of which can
2082 * potentially contain jump tables:
2084 * - .rodata: can contain GCC switch tables
2085 * - .rodata.<func>: same, if -fdata-sections is being used
2086 * - .rodata..c_jump_table: contains C annotated jump tables
2088 * .rodata.str1.* sections are ignored; they don't contain jump tables.
2090 for_each_sec(file, sec) {
2091 if (!strncmp(sec->name, ".rodata", 7) &&
2092 !strstr(sec->name, ".str1.")) {
2098 file->rodata = found;
2101 static int decode_sections(struct objtool_file *file)
2107 ret = decode_instructions(file);
2111 ret = add_dead_ends(file);
2116 add_uaccess_safe(file);
2118 ret = add_ignore_alternatives(file);
2123 * Must be before add_{jump_call}_destination.
2125 ret = classify_symbols(file);
2130 * Must be before add_special_section_alts() as that depends on
2131 * jump_dest being set.
2133 ret = add_jump_destinations(file);
2137 ret = add_special_section_alts(file);
2142 * Must be before add_call_destination(); it changes INSN_CALL to
2145 ret = read_intra_function_calls(file);
2149 ret = add_call_destinations(file);
2153 ret = add_jump_table_alts(file);
2157 ret = read_unwind_hints(file);
2161 ret = read_retpoline_hints(file);
2165 ret = read_instr_hints(file);
2172 static bool is_fentry_call(struct instruction *insn)
2174 if (insn->type == INSN_CALL &&
2176 insn->call_dest->fentry)
2182 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
2184 struct cfi_state *cfi = &state->cfi;
2187 if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
2190 if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
2193 if (cfi->stack_size != initial_func_cfi.cfa.offset)
2196 for (i = 0; i < CFI_NUM_REGS; i++) {
2197 if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
2198 cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
2205 static bool check_reg_frame_pos(const struct cfi_reg *reg,
2206 int expected_offset)
2208 return reg->base == CFI_CFA &&
2209 reg->offset == expected_offset;
2212 static bool has_valid_stack_frame(struct insn_state *state)
2214 struct cfi_state *cfi = &state->cfi;
2216 if (cfi->cfa.base == CFI_BP &&
2217 check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
2218 check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
2221 if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
2227 static int update_cfi_state_regs(struct instruction *insn,
2228 struct cfi_state *cfi,
2229 struct stack_op *op)
2231 struct cfi_reg *cfa = &cfi->cfa;
2233 if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
2237 if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
2241 if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
2244 /* add immediate to sp */
2245 if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
2246 op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
2247 cfa->offset -= op->src.offset;
2252 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
2254 if (arch_callee_saved_reg(reg) &&
2255 cfi->regs[reg].base == CFI_UNDEFINED) {
2256 cfi->regs[reg].base = base;
2257 cfi->regs[reg].offset = offset;
2261 static void restore_reg(struct cfi_state *cfi, unsigned char reg)
2263 cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
2264 cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
2268 * A note about DRAP stack alignment:
2270 * GCC has the concept of a DRAP register, which is used to help keep track of
2271 * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP
2272 * register. The typical DRAP pattern is:
2274 * 4c 8d 54 24 08 lea 0x8(%rsp),%r10
2275 * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp
2276 * 41 ff 72 f8 pushq -0x8(%r10)
2278 * 48 89 e5 mov %rsp,%rbp
2285 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2288 * There are some variations in the epilogues, like:
2296 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2301 * 4c 8b 55 e8 mov -0x18(%rbp),%r10
2302 * 48 8b 5d e0 mov -0x20(%rbp),%rbx
2303 * 4c 8b 65 f0 mov -0x10(%rbp),%r12
2304 * 4c 8b 6d f8 mov -0x8(%rbp),%r13
2306 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2309 * Sometimes r13 is used as the DRAP register, in which case it's saved and
2310 * restored beforehand:
2313 * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13
2314 * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp
2316 * 49 8d 65 f0 lea -0x10(%r13),%rsp
2320 static int update_cfi_state(struct instruction *insn,
2321 struct instruction *next_insn,
2322 struct cfi_state *cfi, struct stack_op *op)
2324 struct cfi_reg *cfa = &cfi->cfa;
2325 struct cfi_reg *regs = cfi->regs;
2327 /* stack operations don't make sense with an undefined CFA */
2328 if (cfa->base == CFI_UNDEFINED) {
2330 WARN_FUNC("undefined stack state", insn->sec, insn->offset);
2336 if (cfi->type == UNWIND_HINT_TYPE_REGS ||
2337 cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
2338 return update_cfi_state_regs(insn, cfi, op);
2340 switch (op->dest.type) {
2343 switch (op->src.type) {
2346 if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
2347 cfa->base == CFI_SP &&
2348 check_reg_frame_pos(®s[CFI_BP], -cfa->offset)) {
2350 /* mov %rsp, %rbp */
2351 cfa->base = op->dest.reg;
2352 cfi->bp_scratch = false;
2355 else if (op->src.reg == CFI_SP &&
2356 op->dest.reg == CFI_BP && cfi->drap) {
2358 /* drap: mov %rsp, %rbp */
2359 regs[CFI_BP].base = CFI_BP;
2360 regs[CFI_BP].offset = -cfi->stack_size;
2361 cfi->bp_scratch = false;
2364 else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2369 * This is needed for the rare case where GCC
2376 cfi->vals[op->dest.reg].base = CFI_CFA;
2377 cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2380 else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
2381 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
2386 * Restore the original stack pointer (Clang).
2388 cfi->stack_size = -cfi->regs[CFI_BP].offset;
2391 else if (op->dest.reg == cfa->base) {
2393 /* mov %reg, %rsp */
2394 if (cfa->base == CFI_SP &&
2395 cfi->vals[op->src.reg].base == CFI_CFA) {
2398 * This is needed for the rare case
2399 * where GCC does something dumb like:
2401 * lea 0x8(%rsp), %rcx
2405 cfa->offset = -cfi->vals[op->src.reg].offset;
2406 cfi->stack_size = cfa->offset;
2408 } else if (cfa->base == CFI_SP &&
2409 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2410 cfi->vals[op->src.reg].offset == cfa->offset) {
2415 * 1: mov %rsp, (%[tos])
2416 * 2: mov %[tos], %rsp
2422 * 1 - places a pointer to the previous
2423 * stack at the Top-of-Stack of the
2426 * 2 - switches to the new stack.
2428 * 3 - pops the Top-of-Stack to restore
2429 * the original stack.
2431 * Note: we set base to SP_INDIRECT
2432 * here and preserve offset. Therefore
2433 * when the unwinder reaches ToS it
2434 * will dereference SP and then add the
2435 * offset to find the next frame, IOW:
2438 cfa->base = CFI_SP_INDIRECT;
2441 cfa->base = CFI_UNDEFINED;
2446 else if (op->dest.reg == CFI_SP &&
2447 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2448 cfi->vals[op->src.reg].offset == cfa->offset) {
2451 * The same stack swizzle case 2) as above. But
2452 * because we can't change cfa->base, case 3)
2453 * will become a regular POP. Pretend we're a
2454 * PUSH so things don't go unbalanced.
2456 cfi->stack_size += 8;
2463 if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
2466 cfi->stack_size -= op->src.offset;
2467 if (cfa->base == CFI_SP)
2468 cfa->offset -= op->src.offset;
2472 if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
2474 /* lea disp(%rbp), %rsp */
2475 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
2479 if (!cfi->drap && op->src.reg == CFI_SP &&
2480 op->dest.reg == CFI_BP && cfa->base == CFI_SP &&
2481 check_reg_frame_pos(®s[CFI_BP], -cfa->offset + op->src.offset)) {
2483 /* lea disp(%rsp), %rbp */
2485 cfa->offset -= op->src.offset;
2486 cfi->bp_scratch = false;
2490 if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2492 /* drap: lea disp(%rsp), %drap */
2493 cfi->drap_reg = op->dest.reg;
2496 * lea disp(%rsp), %reg
2498 * This is needed for the rare case where GCC
2499 * does something dumb like:
2501 * lea 0x8(%rsp), %rcx
2505 cfi->vals[op->dest.reg].base = CFI_CFA;
2506 cfi->vals[op->dest.reg].offset = \
2507 -cfi->stack_size + op->src.offset;
2512 if (cfi->drap && op->dest.reg == CFI_SP &&
2513 op->src.reg == cfi->drap_reg) {
2515 /* drap: lea disp(%drap), %rsp */
2517 cfa->offset = cfi->stack_size = -op->src.offset;
2518 cfi->drap_reg = CFI_UNDEFINED;
2523 if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
2524 WARN_FUNC("unsupported stack register modification",
2525 insn->sec, insn->offset);
2532 if (op->dest.reg != CFI_SP ||
2533 (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
2534 (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
2535 WARN_FUNC("unsupported stack pointer realignment",
2536 insn->sec, insn->offset);
2540 if (cfi->drap_reg != CFI_UNDEFINED) {
2541 /* drap: and imm, %rsp */
2542 cfa->base = cfi->drap_reg;
2543 cfa->offset = cfi->stack_size = 0;
2548 * Older versions of GCC (4.8ish) realign the stack
2549 * without DRAP, with a frame pointer.
2556 if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
2558 /* pop %rsp; # restore from a stack swizzle */
2563 if (!cfi->drap && op->dest.reg == cfa->base) {
2569 if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
2570 op->dest.reg == cfi->drap_reg &&
2571 cfi->drap_offset == -cfi->stack_size) {
2573 /* drap: pop %drap */
2574 cfa->base = cfi->drap_reg;
2576 cfi->drap_offset = -1;
2578 } else if (cfi->stack_size == -regs[op->dest.reg].offset) {
2581 restore_reg(cfi, op->dest.reg);
2584 cfi->stack_size -= 8;
2585 if (cfa->base == CFI_SP)
2590 case OP_SRC_REG_INDIRECT:
2591 if (!cfi->drap && op->dest.reg == cfa->base &&
2592 op->dest.reg == CFI_BP) {
2594 /* mov disp(%rsp), %rbp */
2596 cfa->offset = cfi->stack_size;
2599 if (cfi->drap && op->src.reg == CFI_BP &&
2600 op->src.offset == cfi->drap_offset) {
2602 /* drap: mov disp(%rbp), %drap */
2603 cfa->base = cfi->drap_reg;
2605 cfi->drap_offset = -1;
2608 if (cfi->drap && op->src.reg == CFI_BP &&
2609 op->src.offset == regs[op->dest.reg].offset) {
2611 /* drap: mov disp(%rbp), %reg */
2612 restore_reg(cfi, op->dest.reg);
2614 } else if (op->src.reg == cfa->base &&
2615 op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
2617 /* mov disp(%rbp), %reg */
2618 /* mov disp(%rsp), %reg */
2619 restore_reg(cfi, op->dest.reg);
2621 } else if (op->src.reg == CFI_SP &&
2622 op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
2624 /* mov disp(%rsp), %reg */
2625 restore_reg(cfi, op->dest.reg);
2631 WARN_FUNC("unknown stack-related instruction",
2632 insn->sec, insn->offset);
2640 cfi->stack_size += 8;
2641 if (cfa->base == CFI_SP)
2644 if (op->src.type != OP_SRC_REG)
2648 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
2650 /* drap: push %drap */
2651 cfa->base = CFI_BP_INDIRECT;
2652 cfa->offset = -cfi->stack_size;
2654 /* save drap so we know when to restore it */
2655 cfi->drap_offset = -cfi->stack_size;
2657 } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
2659 /* drap: push %rbp */
2660 cfi->stack_size = 0;
2664 /* drap: push %reg */
2665 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
2671 save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
2674 /* detect when asm code uses rbp as a scratch register */
2675 if (!no_fp && insn->func && op->src.reg == CFI_BP &&
2676 cfa->base != CFI_BP)
2677 cfi->bp_scratch = true;
2680 case OP_DEST_REG_INDIRECT:
2683 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
2685 /* drap: mov %drap, disp(%rbp) */
2686 cfa->base = CFI_BP_INDIRECT;
2687 cfa->offset = op->dest.offset;
2689 /* save drap offset so we know when to restore it */
2690 cfi->drap_offset = op->dest.offset;
2693 /* drap: mov reg, disp(%rbp) */
2694 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
2697 } else if (op->dest.reg == cfa->base) {
2699 /* mov reg, disp(%rbp) */
2700 /* mov reg, disp(%rsp) */
2701 save_reg(cfi, op->src.reg, CFI_CFA,
2702 op->dest.offset - cfi->cfa.offset);
2704 } else if (op->dest.reg == CFI_SP) {
2706 /* mov reg, disp(%rsp) */
2707 save_reg(cfi, op->src.reg, CFI_CFA,
2708 op->dest.offset - cfi->stack_size);
2710 } else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
2712 /* mov %rsp, (%reg); # setup a stack swizzle. */
2713 cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
2714 cfi->vals[op->dest.reg].offset = cfa->offset;
2720 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
2721 WARN_FUNC("unknown stack-related memory operation",
2722 insn->sec, insn->offset);
2727 cfi->stack_size -= 8;
2728 if (cfa->base == CFI_SP)
2734 WARN_FUNC("unknown stack-related instruction",
2735 insn->sec, insn->offset);
2743 * The stack layouts of alternatives instructions can sometimes diverge when
2744 * they have stack modifications. That's fine as long as the potential stack
2745 * layouts don't conflict at any given potential instruction boundary.
2747 * Flatten the CFIs of the different alternative code streams (both original
2748 * and replacement) into a single shared CFI array which can be used to detect
2749 * conflicts and nicely feed a linear array of ORC entries to the unwinder.
2751 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
2753 struct cfi_state **alt_cfi;
2756 if (!insn->alt_group)
2760 WARN("CFI missing");
2764 alt_cfi = insn->alt_group->cfi;
2765 group_off = insn->offset - insn->alt_group->first_insn->offset;
2767 if (!alt_cfi[group_off]) {
2768 alt_cfi[group_off] = insn->cfi;
2770 if (cficmp(alt_cfi[group_off], insn->cfi)) {
2771 WARN_FUNC("stack layout conflict in alternatives",
2772 insn->sec, insn->offset);
2780 static int handle_insn_ops(struct instruction *insn,
2781 struct instruction *next_insn,
2782 struct insn_state *state)
2784 struct stack_op *op;
2786 list_for_each_entry(op, &insn->stack_ops, list) {
2788 if (update_cfi_state(insn, next_insn, &state->cfi, op))
2791 if (!insn->alt_group)
2794 if (op->dest.type == OP_DEST_PUSHF) {
2795 if (!state->uaccess_stack) {
2796 state->uaccess_stack = 1;
2797 } else if (state->uaccess_stack >> 31) {
2798 WARN_FUNC("PUSHF stack exhausted",
2799 insn->sec, insn->offset);
2802 state->uaccess_stack <<= 1;
2803 state->uaccess_stack |= state->uaccess;
2806 if (op->src.type == OP_SRC_POPF) {
2807 if (state->uaccess_stack) {
2808 state->uaccess = state->uaccess_stack & 1;
2809 state->uaccess_stack >>= 1;
2810 if (state->uaccess_stack == 1)
2811 state->uaccess_stack = 0;
2819 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
2821 struct cfi_state *cfi1 = insn->cfi;
2825 WARN("CFI missing");
2829 if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
2831 WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
2832 insn->sec, insn->offset,
2833 cfi1->cfa.base, cfi1->cfa.offset,
2834 cfi2->cfa.base, cfi2->cfa.offset);
2836 } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
2837 for (i = 0; i < CFI_NUM_REGS; i++) {
2838 if (!memcmp(&cfi1->regs[i], &cfi2->regs[i],
2839 sizeof(struct cfi_reg)))
2842 WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
2843 insn->sec, insn->offset,
2844 i, cfi1->regs[i].base, cfi1->regs[i].offset,
2845 i, cfi2->regs[i].base, cfi2->regs[i].offset);
2849 } else if (cfi1->type != cfi2->type) {
2851 WARN_FUNC("stack state mismatch: type1=%d type2=%d",
2852 insn->sec, insn->offset, cfi1->type, cfi2->type);
2854 } else if (cfi1->drap != cfi2->drap ||
2855 (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
2856 (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
2858 WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
2859 insn->sec, insn->offset,
2860 cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
2861 cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
2869 static inline bool func_uaccess_safe(struct symbol *func)
2872 return func->uaccess_safe;
2877 static inline const char *call_dest_name(struct instruction *insn)
2879 if (insn->call_dest)
2880 return insn->call_dest->name;
2885 static inline bool noinstr_call_dest(struct symbol *func)
2888 * We can't deal with indirect function calls at present;
2889 * assume they're instrumented.
2895 * If the symbol is from a noinstr section; we good.
2897 if (func->sec->noinstr)
2901 * The __ubsan_handle_*() calls are like WARN(), they only happen when
2902 * something 'BAD' happened. At the risk of taking the machine down,
2903 * let them proceed to get the message out.
2905 if (!strncmp(func->name, "__ubsan_handle_", 15))
2911 static int validate_call(struct instruction *insn, struct insn_state *state)
2913 if (state->noinstr && state->instr <= 0 &&
2914 !noinstr_call_dest(insn->call_dest)) {
2915 WARN_FUNC("call to %s() leaves .noinstr.text section",
2916 insn->sec, insn->offset, call_dest_name(insn));
2920 if (state->uaccess && !func_uaccess_safe(insn->call_dest)) {
2921 WARN_FUNC("call to %s() with UACCESS enabled",
2922 insn->sec, insn->offset, call_dest_name(insn));
2927 WARN_FUNC("call to %s() with DF set",
2928 insn->sec, insn->offset, call_dest_name(insn));
2935 static int validate_sibling_call(struct instruction *insn, struct insn_state *state)
2937 if (has_modified_stack_frame(insn, state)) {
2938 WARN_FUNC("sibling call from callable instruction with modified stack frame",
2939 insn->sec, insn->offset);
2943 return validate_call(insn, state);
2946 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
2948 if (state->noinstr && state->instr > 0) {
2949 WARN_FUNC("return with instrumentation enabled",
2950 insn->sec, insn->offset);
2954 if (state->uaccess && !func_uaccess_safe(func)) {
2955 WARN_FUNC("return with UACCESS enabled",
2956 insn->sec, insn->offset);
2960 if (!state->uaccess && func_uaccess_safe(func)) {
2961 WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function",
2962 insn->sec, insn->offset);
2967 WARN_FUNC("return with DF set",
2968 insn->sec, insn->offset);
2972 if (func && has_modified_stack_frame(insn, state)) {
2973 WARN_FUNC("return with modified stack frame",
2974 insn->sec, insn->offset);
2978 if (state->cfi.bp_scratch) {
2979 WARN_FUNC("BP used as a scratch register",
2980 insn->sec, insn->offset);
2987 static struct instruction *next_insn_to_validate(struct objtool_file *file,
2988 struct instruction *insn)
2990 struct alt_group *alt_group = insn->alt_group;
2993 * Simulate the fact that alternatives are patched in-place. When the
2994 * end of a replacement alt_group is reached, redirect objtool flow to
2995 * the end of the original alt_group.
2997 if (alt_group && insn == alt_group->last_insn && alt_group->orig_group)
2998 return next_insn_same_sec(file, alt_group->orig_group->last_insn);
3000 return next_insn_same_sec(file, insn);
3004 * Follow the branch starting at the given instruction, and recursively follow
3005 * any other branches (jumps). Meanwhile, track the frame pointer state at
3006 * each instruction and validate all the rules described in
3007 * tools/objtool/Documentation/stack-validation.txt.
3009 static int validate_branch(struct objtool_file *file, struct symbol *func,
3010 struct instruction *insn, struct insn_state state)
3012 struct alternative *alt;
3013 struct instruction *next_insn, *prev_insn = NULL;
3014 struct section *sec;
3021 next_insn = next_insn_to_validate(file, insn);
3023 if (file->c_file && func && insn->func && func != insn->func->pfunc) {
3024 WARN("%s() falls through to next function %s()",
3025 func->name, insn->func->name);
3029 if (func && insn->ignore) {
3030 WARN_FUNC("BUG: why am I validating an ignored function?",
3035 visited = VISITED_BRANCH << state.uaccess;
3036 if (insn->visited & VISITED_BRANCH_MASK) {
3037 if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
3040 if (insn->visited & visited)
3047 state.instr += insn->instr;
3050 if (insn->restore) {
3051 struct instruction *save_insn, *i;
3056 sym_for_each_insn_continue_reverse(file, func, i) {
3064 WARN_FUNC("no corresponding CFI save for CFI restore",
3069 if (!save_insn->visited) {
3070 WARN_FUNC("objtool isn't smart enough to handle this CFI save/restore combo",
3075 insn->cfi = save_insn->cfi;
3079 state.cfi = *insn->cfi;
3081 /* XXX track if we actually changed state.cfi */
3083 if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) {
3084 insn->cfi = prev_insn->cfi;
3087 insn->cfi = cfi_hash_find_or_add(&state.cfi);
3091 insn->visited |= visited;
3093 if (propagate_alt_cfi(file, insn))
3096 if (!insn->ignore_alts && !list_empty(&insn->alts)) {
3097 bool skip_orig = false;
3099 list_for_each_entry(alt, &insn->alts, list) {
3103 ret = validate_branch(file, func, alt->insn, state);
3106 BT_FUNC("(alt)", insn);
3115 if (handle_insn_ops(insn, next_insn, &state))
3118 switch (insn->type) {
3121 if (sls && !insn->retpoline_safe &&
3122 next_insn && next_insn->type != INSN_TRAP) {
3123 WARN_FUNC("missing int3 after ret",
3124 insn->sec, insn->offset);
3126 return validate_return(func, insn, &state);
3129 case INSN_CALL_DYNAMIC:
3130 ret = validate_call(insn, &state);
3134 if (!no_fp && func && !is_fentry_call(insn) &&
3135 !has_valid_stack_frame(&state)) {
3136 WARN_FUNC("call without frame pointer save/setup",
3141 if (dead_end_function(file, insn->call_dest))
3146 case INSN_JUMP_CONDITIONAL:
3147 case INSN_JUMP_UNCONDITIONAL:
3148 if (is_sibling_call(insn)) {
3149 ret = validate_sibling_call(insn, &state);
3153 } else if (insn->jump_dest) {
3154 ret = validate_branch(file, func,
3155 insn->jump_dest, state);
3158 BT_FUNC("(branch)", insn);
3163 if (insn->type == INSN_JUMP_UNCONDITIONAL)
3168 case INSN_JUMP_DYNAMIC:
3169 if (sls && !insn->retpoline_safe &&
3170 next_insn && next_insn->type != INSN_TRAP) {
3171 WARN_FUNC("missing int3 after indirect jump",
3172 insn->sec, insn->offset);
3176 case INSN_JUMP_DYNAMIC_CONDITIONAL:
3177 if (is_sibling_call(insn)) {
3178 ret = validate_sibling_call(insn, &state);
3183 if (insn->type == INSN_JUMP_DYNAMIC)
3188 case INSN_CONTEXT_SWITCH:
3189 if (func && (!next_insn || !next_insn->hint)) {
3190 WARN_FUNC("unsupported instruction in callable function",
3197 if (state.uaccess) {
3198 WARN_FUNC("recursive UACCESS enable", sec, insn->offset);
3202 state.uaccess = true;
3206 if (!state.uaccess && func) {
3207 WARN_FUNC("redundant UACCESS disable", sec, insn->offset);
3211 if (func_uaccess_safe(func) && !state.uaccess_stack) {
3212 WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset);
3216 state.uaccess = false;
3221 WARN_FUNC("recursive STD", sec, insn->offset);
3229 if (!state.df && func) {
3230 WARN_FUNC("redundant CLD", sec, insn->offset);
3245 if (state.cfi.cfa.base == CFI_UNDEFINED)
3247 WARN("%s: unexpected end of section", sec->name);
3258 static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
3260 struct instruction *insn;
3261 struct insn_state state;
3262 int ret, warnings = 0;
3267 init_insn_state(&state, sec);
3270 insn = find_insn(file, sec, 0);
3274 insn = list_first_entry(&file->insn_list, typeof(*insn), list);
3277 while (&insn->list != &file->insn_list && (!sec || insn->sec == sec)) {
3278 if (insn->hint && !insn->visited) {
3279 ret = validate_branch(file, insn->func, insn, state);
3280 if (ret && backtrace)
3281 BT_FUNC("<=== (hint)", insn);
3285 insn = list_next_entry(insn, list);
3292 * Validate rethunk entry constraint: must untrain RET before the first RET.
3294 * Follow every branch (intra-function) and ensure ANNOTATE_UNRET_END comes
3295 * before an actual RET instruction.
3297 static int validate_entry(struct objtool_file *file, struct instruction *insn)
3299 struct instruction *next, *dest;
3300 int ret, warnings = 0;
3303 next = next_insn_to_validate(file, insn);
3305 if (insn->visited & VISITED_ENTRY)
3308 insn->visited |= VISITED_ENTRY;
3310 if (!insn->ignore_alts && !list_empty(&insn->alts)) {
3311 struct alternative *alt;
3312 bool skip_orig = false;
3314 list_for_each_entry(alt, &insn->alts, list) {
3318 ret = validate_entry(file, alt->insn);
3321 BT_FUNC("(alt)", insn);
3330 switch (insn->type) {
3332 case INSN_CALL_DYNAMIC:
3333 case INSN_JUMP_DYNAMIC:
3334 case INSN_JUMP_DYNAMIC_CONDITIONAL:
3335 WARN_FUNC("early indirect call", insn->sec, insn->offset);
3338 case INSN_JUMP_UNCONDITIONAL:
3339 case INSN_JUMP_CONDITIONAL:
3340 if (!is_sibling_call(insn)) {
3341 if (!insn->jump_dest) {
3342 WARN_FUNC("unresolved jump target after linking?!?",
3343 insn->sec, insn->offset);
3346 ret = validate_entry(file, insn->jump_dest);
3349 BT_FUNC("(branch%s)", insn,
3350 insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : "");
3355 if (insn->type == INSN_JUMP_UNCONDITIONAL)
3363 dest = find_insn(file, insn->call_dest->sec,
3364 insn->call_dest->offset);
3366 WARN("Unresolved function after linking!?: %s",
3367 insn->call_dest->name);
3371 ret = validate_entry(file, dest);
3374 BT_FUNC("(call)", insn);
3378 * If a call returns without error, it must have seen UNTRAIN_RET.
3379 * Therefore any non-error return is a success.
3384 WARN_FUNC("RET before UNTRAIN", insn->sec, insn->offset);
3388 if (insn->retpoline_safe)
3397 WARN_FUNC("teh end!", insn->sec, insn->offset);
3407 * Validate that all branches starting at 'insn->entry' encounter UNRET_END
3410 static int validate_unret(struct objtool_file *file)
3412 struct instruction *insn;
3413 int ret, warnings = 0;
3415 for_each_insn(file, insn) {
3419 ret = validate_entry(file, insn);
3421 WARN_FUNC("Failed UNRET validation", insn->sec, insn->offset);
3430 static int validate_retpoline(struct objtool_file *file)
3432 struct instruction *insn;
3435 for_each_insn(file, insn) {
3436 if (insn->type != INSN_JUMP_DYNAMIC &&
3437 insn->type != INSN_CALL_DYNAMIC &&
3438 insn->type != INSN_RETURN)
3441 if (insn->retpoline_safe)
3445 * .init.text code is ran before userspace and thus doesn't
3446 * strictly need retpolines, except for modules which are
3447 * loaded late, they very much do need retpoline in their
3450 if (!strcmp(insn->sec->name, ".init.text") && !module)
3453 if (insn->type == INSN_RETURN) {
3455 WARN_FUNC("'naked' return found in RETHUNK build",
3456 insn->sec, insn->offset);
3460 WARN_FUNC("indirect %s found in RETPOLINE build",
3461 insn->sec, insn->offset,
3462 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
3471 static bool is_kasan_insn(struct instruction *insn)
3473 return (insn->type == INSN_CALL &&
3474 !strcmp(insn->call_dest->name, "__asan_handle_no_return"));
3477 static bool is_ubsan_insn(struct instruction *insn)
3479 return (insn->type == INSN_CALL &&
3480 !strcmp(insn->call_dest->name,
3481 "__ubsan_handle_builtin_unreachable"));
3484 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
3487 struct instruction *prev_insn;
3489 if (insn->ignore || insn->type == INSN_NOP || insn->type == INSN_TRAP)
3493 * Ignore any unused exceptions. This can happen when a whitelisted
3494 * function has an exception table entry.
3496 * Also ignore alternative replacement instructions. This can happen
3497 * when a whitelisted function uses one of the ALTERNATIVE macros.
3499 if (!strcmp(insn->sec->name, ".fixup") ||
3500 !strcmp(insn->sec->name, ".altinstr_replacement") ||
3501 !strcmp(insn->sec->name, ".altinstr_aux"))
3508 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
3509 * __builtin_unreachable(). The BUG() macro has an unreachable() after
3510 * the UD2, which causes GCC's undefined trap logic to emit another UD2
3511 * (or occasionally a JMP to UD2).
3513 * It may also insert a UD2 after calling a __noreturn function.
3515 prev_insn = list_prev_entry(insn, list);
3516 if ((prev_insn->dead_end || dead_end_function(file, prev_insn->call_dest)) &&
3517 (insn->type == INSN_BUG ||
3518 (insn->type == INSN_JUMP_UNCONDITIONAL &&
3519 insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
3523 * Check if this (or a subsequent) instruction is related to
3524 * CONFIG_UBSAN or CONFIG_KASAN.
3526 * End the search at 5 instructions to avoid going into the weeds.
3528 for (i = 0; i < 5; i++) {
3530 if (is_kasan_insn(insn) || is_ubsan_insn(insn))
3533 if (insn->type == INSN_JUMP_UNCONDITIONAL) {
3534 if (insn->jump_dest &&
3535 insn->jump_dest->func == insn->func) {
3536 insn = insn->jump_dest;
3543 if (insn->offset + insn->len >= insn->func->offset + insn->func->len)
3546 insn = list_next_entry(insn, list);
3552 static int validate_symbol(struct objtool_file *file, struct section *sec,
3553 struct symbol *sym, struct insn_state *state)
3555 struct instruction *insn;
3559 WARN("%s() is missing an ELF size annotation", sym->name);
3563 if (sym->pfunc != sym || sym->alias != sym)
3566 insn = find_insn(file, sec, sym->offset);
3567 if (!insn || insn->ignore || insn->visited)
3570 state->uaccess = sym->uaccess_safe;
3572 ret = validate_branch(file, insn->func, insn, *state);
3573 if (ret && backtrace)
3574 BT_FUNC("<=== (sym)", insn);
3578 static int validate_section(struct objtool_file *file, struct section *sec)
3580 struct insn_state state;
3581 struct symbol *func;
3584 list_for_each_entry(func, &sec->symbol_list, list) {
3585 if (func->type != STT_FUNC)
3588 init_insn_state(&state, sec);
3589 set_func_state(&state.cfi);
3591 warnings += validate_symbol(file, sec, func, &state);
3597 static int validate_vmlinux_functions(struct objtool_file *file)
3599 struct section *sec;
3602 sec = find_section_by_name(file->elf, ".noinstr.text");
3604 warnings += validate_section(file, sec);
3605 warnings += validate_unwind_hints(file, sec);
3608 sec = find_section_by_name(file->elf, ".entry.text");
3610 warnings += validate_section(file, sec);
3611 warnings += validate_unwind_hints(file, sec);
3617 static int validate_functions(struct objtool_file *file)
3619 struct section *sec;
3622 for_each_sec(file, sec) {
3623 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
3626 warnings += validate_section(file, sec);
3632 static int validate_reachable_instructions(struct objtool_file *file)
3634 struct instruction *insn;
3636 if (file->ignore_unreachables)
3639 for_each_insn(file, insn) {
3640 if (insn->visited || ignore_unreachable_insn(file, insn))
3643 WARN_FUNC("unreachable instruction", insn->sec, insn->offset);
3650 int check(struct objtool_file *file)
3652 int ret, warnings = 0;
3654 arch_initial_func_cfi_state(&initial_func_cfi);
3655 init_cfi_state(&init_cfi);
3656 init_cfi_state(&func_cfi);
3657 set_func_state(&func_cfi);
3659 if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3)))
3662 cfi_hash_add(&init_cfi);
3663 cfi_hash_add(&func_cfi);
3665 ret = decode_sections(file);
3671 if (list_empty(&file->insn_list))
3674 if (vmlinux && !validate_dup) {
3675 ret = validate_vmlinux_functions(file);
3684 ret = validate_retpoline(file);
3690 ret = validate_functions(file);
3695 ret = validate_unwind_hints(file, NULL);
3702 * Must be after validate_branch() and friends, it plays
3703 * further games with insn->visited.
3705 ret = validate_unret(file);
3712 ret = validate_reachable_instructions(file);
3718 ret = create_static_call_sections(file);
3724 ret = create_retpoline_sites_sections(file);
3731 ret = create_return_sites_sections(file);
3738 ret = create_mcount_loc_sections(file);
3745 printf("nr_insns_visited: %ld\n", nr_insns_visited);
3746 printf("nr_cfi: %ld\n", nr_cfi);
3747 printf("nr_cfi_reused: %ld\n", nr_cfi_reused);
3748 printf("nr_cfi_cache: %ld\n", nr_cfi_cache);
3753 * For now, don't fail the kernel build on fatal warnings. These
3754 * errors are still fairly common due to the growing matrix of
3755 * supported toolchains and their recent pace of change.