#include <string.h>
#include <stdlib.h>
-#include "builtin.h"
-#include "cfi.h"
-#include "arch.h"
-#include "check.h"
-#include "special.h"
-#include "warn.h"
-#include "arch_elf.h"
+#include <arch/elf.h>
+#include <objtool/builtin.h>
+#include <objtool/cfi.h>
+#include <objtool/arch.h>
+#include <objtool/check.h>
+#include <objtool/special.h>
+#include <objtool/warn.h>
+#include <objtool/endianness.h>
#include <linux/objtool.h>
#include <linux/hashtable.h>
#include <linux/kernel.h>
#include <linux/static_call_types.h>
-#define FAKE_JUMP_OFFSET -1
-
struct alternative {
struct list_head list;
struct instruction *insn;
static bool is_sibling_call(struct instruction *insn)
{
+ /*
+ * Assume only ELF functions can make sibling calls. This ensures
+ * sibling call detection consistency between vmlinux.o and individual
+ * objects.
+ */
+ if (!insn->func)
+ return false;
+
/* An indirect jump is either a sibling call or a jump to a table. */
if (insn->type == INSN_JUMP_DYNAMIC)
return list_empty(&insn->alts);
- if (!is_static_jump(insn))
- return false;
-
/* add_jump_destinations() sets insn->call_dest for sibling calls. */
- return !!insn->call_dest;
+ return (is_static_jump(insn) && insn->call_dest);
}
/*
"machine_real_restart",
"rewind_stack_do_exit",
"kunit_try_catch_throw",
+ "xen_start_kernel",
};
if (!func)
* not correctly determine insn->call_dest->sec (external symbols do
* not have a section).
*/
- if (vmlinux && sec)
+ if (vmlinux && noinstr && sec)
state->noinstr = sec->noinstr;
}
key_sym = find_symbol_by_name(file->elf, tmp);
if (!key_sym) {
- WARN("static_call: can't find static_call_key symbol: %s", tmp);
- return -1;
+ if (!module) {
+ WARN("static_call: can't find static_call_key symbol: %s", tmp);
+ return -1;
+ }
+
+ /*
+ * For modules(), the key might not be exported, which
+ * means the module can make static calls but isn't
+ * allowed to change them.
+ *
+ * In that case we temporarily set the key to be the
+ * trampoline address. This is fixed up in
+ * static_call_add_module().
+ */
+ key_sym = insn->call_dest;
}
free(key_name);
return 0;
}
+ static int create_mcount_loc_sections(struct objtool_file *file)
+ {
+ struct section *sec, *reloc_sec;
+ struct reloc *reloc;
+ unsigned long *loc;
+ struct instruction *insn;
+ int idx;
+
+ sec = find_section_by_name(file->elf, "__mcount_loc");
+ if (sec) {
+ INIT_LIST_HEAD(&file->mcount_loc_list);
+ WARN("file already has __mcount_loc section, skipping");
+ return 0;
+ }
+
+ if (list_empty(&file->mcount_loc_list))
+ return 0;
+
+ idx = 0;
+ list_for_each_entry(insn, &file->mcount_loc_list, mcount_loc_node)
+ idx++;
+
+ sec = elf_create_section(file->elf, "__mcount_loc", 0, sizeof(unsigned long), idx);
+ if (!sec)
+ return -1;
+
+ reloc_sec = elf_create_reloc_section(file->elf, sec, SHT_RELA);
+ if (!reloc_sec)
+ return -1;
+
+ idx = 0;
+ list_for_each_entry(insn, &file->mcount_loc_list, mcount_loc_node) {
+
+ loc = (unsigned long *)sec->data->d_buf + idx;
+ memset(loc, 0, sizeof(unsigned long));
+
+ reloc = malloc(sizeof(*reloc));
+ if (!reloc) {
+ perror("malloc");
+ return -1;
+ }
+ memset(reloc, 0, sizeof(*reloc));
+
+ if (insn->sec->sym) {
+ reloc->sym = insn->sec->sym;
+ reloc->addend = insn->offset;
+ } else {
+ reloc->sym = find_symbol_containing(insn->sec, insn->offset);
+
+ if (!reloc->sym) {
+ WARN("missing symbol for insn at offset 0x%lx\n",
+ insn->offset);
+ return -1;
+ }
+
+ reloc->addend = insn->offset - reloc->sym->offset;
+ }
+
+ reloc->type = R_X86_64_64;
+ reloc->offset = idx * sizeof(unsigned long);
+ reloc->sec = reloc_sec;
+ elf_add_reloc(file->elf, reloc);
+
+ idx++;
+ }
+
+ if (elf_rebuild_reloc_section(file->elf, reloc_sec))
+ return -1;
+
+ return 0;
+ }
+
/*
* Warnings shouldn't be reported for ignored functions.
*/
if (!is_static_jump(insn))
continue;
- if (insn->offset == FAKE_JUMP_OFFSET)
- continue;
-
reloc = find_reloc_by_dest_range(file->elf, insn->sec,
- insn->offset, insn->len);
+ insn->offset, insn->len);
if (!reloc) {
dest_sec = insn->sec;
dest_off = arch_jump_destination(insn);
} else if (reloc->sym->type == STT_SECTION) {
dest_sec = reloc->sym->sec;
dest_off = arch_dest_reloc_offset(reloc->addend);
- } else if (reloc->sym->sec->idx) {
- dest_sec = reloc->sym->sec;
- dest_off = reloc->sym->sym.st_value +
- arch_dest_reloc_offset(reloc->addend);
- } else if (strstr(reloc->sym->name, "_indirect_thunk_")) {
+ } else if (!strncmp(reloc->sym->name, "__x86_indirect_thunk_", 21) ||
+ !strncmp(reloc->sym->name, "__x86_retpoline_", 16)) {
/*
* Retpoline jumps are really dynamic jumps in
* disguise, so convert them accordingly.
insn->retpoline_safe = true;
continue;
- } else {
- /* external sibling call */
+ } else if (insn->func) {
+ /* internal or external sibling call (with reloc) */
insn->call_dest = reloc->sym;
if (insn->call_dest->static_call_tramp) {
list_add_tail(&insn->static_call_node,
&file->static_call_list);
}
continue;
+ } else if (reloc->sym->sec->idx) {
+ dest_sec = reloc->sym->sec;
+ dest_off = reloc->sym->sym.st_value +
+ arch_dest_reloc_offset(reloc->addend);
+ } else {
+ /* non-func asm code jumping to another file */
+ continue;
}
insn->jump_dest = find_insn(file, dest_sec, dest_off);
* case where the parent function's only reference to a
* subfunction is through a jump table.
*/
- if (!strstr(insn->func->name, ".cold.") &&
- strstr(insn->jump_dest->func->name, ".cold.")) {
+ if (!strstr(insn->func->name, ".cold") &&
+ strstr(insn->jump_dest->func->name, ".cold")) {
insn->func->cfunc = insn->jump_dest->func;
insn->jump_dest->func->pfunc = insn->func;
} else if (insn->jump_dest->func->pfunc != insn->func->pfunc &&
insn->jump_dest->offset == insn->jump_dest->func->offset) {
- /* internal sibling call */
+ /* internal sibling call (without reloc) */
insn->call_dest = insn->jump_dest->func;
if (insn->call_dest->static_call_tramp) {
list_add_tail(&insn->static_call_node,
insn->type = INSN_NOP;
}
+ if (mcount && !strcmp(insn->call_dest->name, "__fentry__")) {
+ if (reloc) {
+ reloc->type = R_NONE;
+ elf_write_reloc(file->elf, reloc);
+ }
+
+ elf_write_insn(file->elf, insn->sec,
+ insn->offset, insn->len,
+ arch_nop_insn(insn->len));
+
+ insn->type = INSN_NOP;
+
+ list_add_tail(&insn->mcount_loc_node,
+ &file->mcount_loc_list);
+ }
+
/*
* Whatever stack impact regular CALLs have, should be undone
* by the RETURN of the called function.
}
/*
- * The .alternatives section requires some extra special care, over and above
- * what other special sections require:
- *
- * 1. Because alternatives are patched in-place, we need to insert a fake jump
- * instruction at the end so that validate_branch() skips all the original
- * replaced instructions when validating the new instruction path.
- *
- * 2. An added wrinkle is that the new instruction length might be zero. In
- * that case the old instructions are replaced with noops. We simulate that
- * by creating a fake jump as the only new instruction.
- *
- * 3. In some cases, the alternative section includes an instruction which
- * conditionally jumps to the _end_ of the entry. We have to modify these
- * jumps' destinations to point back to .text rather than the end of the
- * entry in .altinstr_replacement.
+ * The .alternatives section requires some extra special care over and above
+ * other special sections because alternatives are patched in place.
*/
static int handle_group_alt(struct objtool_file *file,
struct special_alt *special_alt,
struct instruction *orig_insn,
struct instruction **new_insn)
{
- static unsigned int alt_group_next_index = 1;
- struct instruction *last_orig_insn, *last_new_insn, *insn, *fake_jump = NULL;
- unsigned int alt_group = alt_group_next_index++;
+ struct instruction *last_orig_insn, *last_new_insn = NULL, *insn, *nop = NULL;
+ struct alt_group *orig_alt_group, *new_alt_group;
unsigned long dest_off;
+
+ orig_alt_group = malloc(sizeof(*orig_alt_group));
+ if (!orig_alt_group) {
+ WARN("malloc failed");
+ return -1;
+ }
+ orig_alt_group->cfi = calloc(special_alt->orig_len,
+ sizeof(struct cfi_state *));
+ if (!orig_alt_group->cfi) {
+ WARN("calloc failed");
+ return -1;
+ }
+
last_orig_insn = NULL;
insn = orig_insn;
sec_for_each_insn_from(file, insn) {
if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
break;
- insn->alt_group = alt_group;
+ insn->alt_group = orig_alt_group;
last_orig_insn = insn;
}
+ orig_alt_group->orig_group = NULL;
+ orig_alt_group->first_insn = orig_insn;
+ orig_alt_group->last_insn = last_orig_insn;
+
+
+ new_alt_group = malloc(sizeof(*new_alt_group));
+ if (!new_alt_group) {
+ WARN("malloc failed");
+ return -1;
+ }
- if (next_insn_same_sec(file, last_orig_insn)) {
- fake_jump = malloc(sizeof(*fake_jump));
- if (!fake_jump) {
+ if (special_alt->new_len < special_alt->orig_len) {
+ /*
+ * Insert a fake nop at the end to make the replacement
+ * alt_group the same size as the original. This is needed to
+ * allow propagate_alt_cfi() to do its magic. When the last
+ * instruction affects the stack, the instruction after it (the
+ * nop) will propagate the new state to the shared CFI array.
+ */
+ nop = malloc(sizeof(*nop));
+ if (!nop) {
WARN("malloc failed");
return -1;
}
- memset(fake_jump, 0, sizeof(*fake_jump));
- INIT_LIST_HEAD(&fake_jump->alts);
- INIT_LIST_HEAD(&fake_jump->stack_ops);
- init_cfi_state(&fake_jump->cfi);
+ memset(nop, 0, sizeof(*nop));
+ INIT_LIST_HEAD(&nop->alts);
+ INIT_LIST_HEAD(&nop->stack_ops);
+ init_cfi_state(&nop->cfi);
- fake_jump->sec = special_alt->new_sec;
- fake_jump->offset = FAKE_JUMP_OFFSET;
- fake_jump->type = INSN_JUMP_UNCONDITIONAL;
- fake_jump->jump_dest = list_next_entry(last_orig_insn, list);
- fake_jump->func = orig_insn->func;
+ nop->sec = special_alt->new_sec;
+ nop->offset = special_alt->new_off + special_alt->new_len;
+ nop->len = special_alt->orig_len - special_alt->new_len;
+ nop->type = INSN_NOP;
+ nop->func = orig_insn->func;
+ nop->alt_group = new_alt_group;
+ nop->ignore = orig_insn->ignore_alts;
}
if (!special_alt->new_len) {
- if (!fake_jump) {
- WARN("%s: empty alternative at end of section",
- special_alt->orig_sec->name);
- return -1;
- }
-
- *new_insn = fake_jump;
- return 0;
+ *new_insn = nop;
+ goto end;
}
- last_new_insn = NULL;
- alt_group = alt_group_next_index++;
insn = *new_insn;
sec_for_each_insn_from(file, insn) {
struct reloc *alt_reloc;
insn->ignore = orig_insn->ignore_alts;
insn->func = orig_insn->func;
- insn->alt_group = alt_group;
+ insn->alt_group = new_alt_group;
/*
* Since alternative replacement code is copy/pasted by the
continue;
dest_off = arch_jump_destination(insn);
- if (dest_off == special_alt->new_off + special_alt->new_len) {
- if (!fake_jump) {
- WARN("%s: alternative jump to end of section",
- special_alt->orig_sec->name);
- return -1;
- }
- insn->jump_dest = fake_jump;
- }
+ if (dest_off == special_alt->new_off + special_alt->new_len)
+ insn->jump_dest = next_insn_same_sec(file, last_orig_insn);
if (!insn->jump_dest) {
WARN_FUNC("can't find alternative jump destination",
return -1;
}
- if (fake_jump)
- list_add(&fake_jump->list, &last_new_insn->list);
-
+ if (nop)
+ list_add(&nop->list, &last_new_insn->list);
+end:
+ new_alt_group->orig_group = orig_alt_group;
+ new_alt_group->first_insn = *new_insn;
+ new_alt_group->last_insn = nop ? : last_new_insn;
+ new_alt_group->cfi = orig_alt_group->cfi;
return 0;
}
return 0;
}
+static void set_func_state(struct cfi_state *state)
+{
+ state->cfa = initial_func_cfi.cfa;
+ memcpy(&state->regs, &initial_func_cfi.regs,
+ CFI_NUM_REGS * sizeof(struct cfi_reg));
+ state->stack_size = initial_func_cfi.cfa.offset;
+}
+
static int read_unwind_hints(struct objtool_file *file)
{
struct section *sec, *relocsec;
struct reloc *reloc;
struct unwind_hint *hint;
struct instruction *insn;
- struct cfi_reg *cfa;
int i;
sec = find_section_by_name(file->elf, ".discard.unwind_hints");
return -1;
}
- cfa = &insn->cfi.cfa;
+ insn->hint = true;
- if (hint->type == UNWIND_HINT_TYPE_RET_OFFSET) {
- insn->ret_offset = hint->sp_offset;
+ if (hint->type == UNWIND_HINT_TYPE_FUNC) {
+ set_func_state(&insn->cfi);
continue;
}
- insn->hint = true;
-
if (arch_decode_hint_reg(insn, hint->sp_reg)) {
WARN_FUNC("unsupported unwind_hint sp base reg %d",
insn->sec, insn->offset, hint->sp_reg);
return -1;
}
- cfa->offset = hint->sp_offset;
+ insn->cfi.cfa.offset = bswap_if_needed(hint->sp_offset);
insn->cfi.type = hint->type;
insn->cfi.end = hint->end;
}
static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
{
- u8 ret_offset = insn->ret_offset;
struct cfi_state *cfi = &state->cfi;
int i;
if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
return true;
- if (cfi->cfa.offset != initial_func_cfi.cfa.offset + ret_offset)
+ if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
return true;
- if (cfi->stack_size != initial_func_cfi.cfa.offset + ret_offset)
+ if (cfi->stack_size != initial_func_cfi.cfa.offset)
return true;
- /*
- * If there is a ret offset hint then don't check registers
- * because a callee-saved register might have been pushed on
- * the stack.
- */
- if (ret_offset)
- return false;
-
for (i = 0; i < CFI_NUM_REGS; i++) {
if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
return false;
}
+static bool check_reg_frame_pos(const struct cfi_reg *reg,
+ int expected_offset)
+{
+ return reg->base == CFI_CFA &&
+ reg->offset == expected_offset;
+}
+
static bool has_valid_stack_frame(struct insn_state *state)
{
struct cfi_state *cfi = &state->cfi;
- if (cfi->cfa.base == CFI_BP && cfi->regs[CFI_BP].base == CFI_CFA &&
- cfi->regs[CFI_BP].offset == -16)
+ if (cfi->cfa.base == CFI_BP &&
+ check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
+ check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
return true;
if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
case OP_SRC_REG:
if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
cfa->base == CFI_SP &&
- regs[CFI_BP].base == CFI_CFA &&
- regs[CFI_BP].offset == -cfa->offset) {
+ check_reg_frame_pos(®s[CFI_BP], -cfa->offset)) {
/* mov %rsp, %rbp */
cfa->base = op->dest.reg;
cfa->offset = -cfi->vals[op->src.reg].offset;
cfi->stack_size = cfa->offset;
+ } else if (cfa->base == CFI_SP &&
+ cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
+ cfi->vals[op->src.reg].offset == cfa->offset) {
+
+ /*
+ * Stack swizzle:
+ *
+ * 1: mov %rsp, (%[tos])
+ * 2: mov %[tos], %rsp
+ * ...
+ * 3: pop %rsp
+ *
+ * Where:
+ *
+ * 1 - places a pointer to the previous
+ * stack at the Top-of-Stack of the
+ * new stack.
+ *
+ * 2 - switches to the new stack.
+ *
+ * 3 - pops the Top-of-Stack to restore
+ * the original stack.
+ *
+ * Note: we set base to SP_INDIRECT
+ * here and preserve offset. Therefore
+ * when the unwinder reaches ToS it
+ * will dereference SP and then add the
+ * offset to find the next frame, IOW:
+ * (%rsp) + offset.
+ */
+ cfa->base = CFI_SP_INDIRECT;
+
} else {
cfa->base = CFI_UNDEFINED;
cfa->offset = 0;
break;
}
+ if (!cfi->drap && op->src.reg == CFI_SP &&
+ op->dest.reg == CFI_BP && cfa->base == CFI_SP &&
+ check_reg_frame_pos(®s[CFI_BP], -cfa->offset + op->src.offset)) {
+
+ /* lea disp(%rsp), %rbp */
+ cfa->base = CFI_BP;
+ cfa->offset -= op->src.offset;
+ cfi->bp_scratch = false;
+ break;
+ }
+
if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
/* drap: lea disp(%rsp), %drap */
case OP_SRC_POP:
case OP_SRC_POPF:
+ if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
+
+ /* pop %rsp; # restore from a stack swizzle */
+ cfa->base = CFI_SP;
+ break;
+ }
+
if (!cfi->drap && op->dest.reg == cfa->base) {
/* pop %rbp */
break;
case OP_SRC_REG_INDIRECT:
+ if (!cfi->drap && op->dest.reg == cfa->base &&
+ op->dest.reg == CFI_BP) {
+
+ /* mov disp(%rsp), %rbp */
+ cfa->base = CFI_SP;
+ cfa->offset = cfi->stack_size;
+ }
+
if (cfi->drap && op->src.reg == CFI_BP &&
op->src.offset == cfi->drap_offset) {
/* mov disp(%rbp), %reg */
/* mov disp(%rsp), %reg */
restore_reg(cfi, op->dest.reg);
+
+ } else if (op->src.reg == CFI_SP &&
+ op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
+
+ /* mov disp(%rsp), %reg */
+ restore_reg(cfi, op->dest.reg);
}
break;
/* mov reg, disp(%rsp) */
save_reg(cfi, op->src.reg, CFI_CFA,
op->dest.offset - cfi->cfa.offset);
+
+ } else if (op->dest.reg == CFI_SP) {
+
+ /* mov reg, disp(%rsp) */
+ save_reg(cfi, op->src.reg, CFI_CFA,
+ op->dest.offset - cfi->stack_size);
+
+ } else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
+
+ /* mov %rsp, (%reg); # setup a stack swizzle. */
+ cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
+ cfi->vals[op->dest.reg].offset = cfa->offset;
}
break;
return 0;
}
-static int handle_insn_ops(struct instruction *insn, struct insn_state *state)
+/*
+ * The stack layouts of alternatives instructions can sometimes diverge when
+ * they have stack modifications. That's fine as long as the potential stack
+ * layouts don't conflict at any given potential instruction boundary.
+ *
+ * Flatten the CFIs of the different alternative code streams (both original
+ * and replacement) into a single shared CFI array which can be used to detect
+ * conflicts and nicely feed a linear array of ORC entries to the unwinder.
+ */
+static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
{
- struct stack_op *op;
+ struct cfi_state **alt_cfi;
+ int group_off;
- list_for_each_entry(op, &insn->stack_ops, list) {
- struct cfi_state old_cfi = state->cfi;
- int res;
+ if (!insn->alt_group)
+ return 0;
- res = update_cfi_state(insn, &state->cfi, op);
- if (res)
- return res;
+ alt_cfi = insn->alt_group->cfi;
+ group_off = insn->offset - insn->alt_group->first_insn->offset;
- if (insn->alt_group && memcmp(&state->cfi, &old_cfi, sizeof(struct cfi_state))) {
- WARN_FUNC("alternative modifies stack", insn->sec, insn->offset);
+ if (!alt_cfi[group_off]) {
+ alt_cfi[group_off] = &insn->cfi;
+ } else {
+ if (memcmp(alt_cfi[group_off], &insn->cfi, sizeof(struct cfi_state))) {
+ WARN_FUNC("stack layout conflict in alternatives",
+ insn->sec, insn->offset);
return -1;
}
+ }
+
+ return 0;
+}
+
+static int handle_insn_ops(struct instruction *insn, struct insn_state *state)
+{
+ struct stack_op *op;
+
+ list_for_each_entry(op, &insn->stack_ops, list) {
+
+ if (update_cfi_state(insn, &state->cfi, op))
+ return 1;
if (op->dest.type == OP_DEST_PUSHF) {
if (!state->uaccess_stack) {
return 0;
}
-/*
- * Alternatives should not contain any ORC entries, this in turn means they
- * should not contain any CFI ops, which implies all instructions should have
- * the same same CFI state.
- *
- * It is possible to constuct alternatives that have unreachable holes that go
- * unreported (because they're NOPs), such holes would result in CFI_UNDEFINED
- * states which then results in ORC entries, which we just said we didn't want.
- *
- * Avoid them by copying the CFI entry of the first instruction into the whole
- * alternative.
- */
-static void fill_alternative_cfi(struct objtool_file *file, struct instruction *insn)
+static struct instruction *next_insn_to_validate(struct objtool_file *file,
+ struct instruction *insn)
{
- struct instruction *first_insn = insn;
- int alt_group = insn->alt_group;
+ struct alt_group *alt_group = insn->alt_group;
- sec_for_each_insn_continue(file, insn) {
- if (insn->alt_group != alt_group)
- break;
- insn->cfi = first_insn->cfi;
- }
+ /*
+ * Simulate the fact that alternatives are patched in-place. When the
+ * end of a replacement alt_group is reached, redirect objtool flow to
+ * the end of the original alt_group.
+ */
+ if (alt_group && insn == alt_group->last_insn && alt_group->orig_group)
+ return next_insn_same_sec(file, alt_group->orig_group->last_insn);
+
+ return next_insn_same_sec(file, insn);
}
/*
sec = insn->sec;
while (1) {
- next_insn = next_insn_same_sec(file, insn);
+ next_insn = next_insn_to_validate(file, insn);
if (file->c_file && func && insn->func && func != insn->func->pfunc) {
WARN("%s() falls through to next function %s()",
insn->visited |= visited;
+ if (propagate_alt_cfi(file, insn))
+ return 1;
+
if (!insn->ignore_alts && !list_empty(&insn->alts)) {
bool skip_orig = false;
}
}
- if (insn->alt_group)
- fill_alternative_cfi(file, insn);
-
if (skip_orig)
return 0;
}
case INSN_JUMP_CONDITIONAL:
case INSN_JUMP_UNCONDITIONAL:
- if (func && is_sibling_call(insn)) {
+ if (is_sibling_call(insn)) {
ret = validate_sibling_call(insn, &state);
if (ret)
return ret;
case INSN_JUMP_DYNAMIC:
case INSN_JUMP_DYNAMIC_CONDITIONAL:
- if (func && is_sibling_call(insn)) {
+ if (is_sibling_call(insn)) {
ret = validate_sibling_call(insn, &state);
if (ret)
return ret;
break;
case INSN_STD:
- if (state.df)
+ if (state.df) {
WARN_FUNC("recursive STD", sec, insn->offset);
+ return 1;
+ }
state.df = true;
break;
case INSN_CLD:
- if (!state.df && func)
+ if (!state.df && func) {
WARN_FUNC("redundant CLD", sec, insn->offset);
+ return 1;
+ }
state.df = false;
break;
!strcmp(insn->sec->name, ".altinstr_aux"))
return true;
- if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->offset == FAKE_JUMP_OFFSET)
- return true;
-
if (!insn->func)
return false;
continue;
init_insn_state(&state, sec);
- state.cfi.cfa = initial_func_cfi.cfa;
- memcpy(&state.cfi.regs, &initial_func_cfi.regs,
- CFI_NUM_REGS * sizeof(struct cfi_reg));
- state.cfi.stack_size = initial_func_cfi.cfa.offset;
+ set_func_state(&state.cfi);
warnings += validate_symbol(file, sec, func, &state);
}
goto out;
warnings += ret;
+ if (mcount) {
+ ret = create_mcount_loc_sections(file);
+ if (ret < 0)
+ goto out;
+ warnings += ret;
+ }
+
out:
- if (ret < 0) {
- /*
- * Fatal error. The binary is corrupt or otherwise broken in
- * some way, or objtool itself is broken. Fail the kernel
- * build.
- */
- return ret;
- }
-
+ /*
+ * For now, don't fail the kernel build on fatal warnings. These
+ * errors are still fairly common due to the growing matrix of
+ * supported toolchains and their recent pace of change.
+ */
return 0;
}