#include "basic-block.h"
#include "regs.h"
#include "flags.h"
-#include "output.h"
#include "function.h"
#include "except.h"
#include "rtl-error.h"
#include "obstack.h"
#include "insn-attr.h"
#include "insn-config.h"
-#include "cfglayout.h"
#include "expr.h"
#include "target.h"
#include "common/common-target.h"
#include "tree-pass.h"
#include "df.h"
+/* Holds the interesting leading and trailing notes for the function.
+ Only applicable if the CFG is in cfglayout mode. */
+static GTY(()) rtx cfg_layout_function_footer;
+static GTY(()) rtx cfg_layout_function_header;
+
+static rtx skip_insns_after_block (basic_block);
+static void record_effective_endpoints (void);
+static rtx label_for_bb (basic_block);
+static void fixup_reorder_chain (void);
+
+void verify_insn_chain (void);
+static void fixup_fallthru_exit_predecessor (void);
static int can_delete_note_p (const_rtx);
static int can_delete_label_p (const_rtx);
static basic_block rtl_split_edge (edge);
static basic_block rtl_redirect_edge_and_branch_force (edge, basic_block);
static edge rtl_redirect_edge_and_branch (edge, basic_block);
static basic_block rtl_split_block (basic_block, void *);
-static void rtl_dump_bb (basic_block, FILE *, int, int);
+static void rtl_dump_bb (FILE *, basic_block, int, int);
static int rtl_verify_flow_info_1 (void);
static void rtl_make_forwarder_block (edge);
\f
&& !in_expr_list_p (forced_labels, label));
}
-/* Delete INSN by patching it out. Return the next insn. */
+/* Delete INSN by patching it out. */
-rtx
+void
delete_insn (rtx insn)
{
- rtx next = NEXT_INSN (insn);
rtx note;
bool really_delete = true;
if (! can_delete_label_p (insn))
{
const char *name = LABEL_NAME (insn);
+ basic_block bb = BLOCK_FOR_INSN (insn);
+ rtx bb_note = NEXT_INSN (insn);
really_delete = false;
PUT_CODE (insn, NOTE);
NOTE_KIND (insn) = NOTE_INSN_DELETED_LABEL;
NOTE_DELETED_LABEL_NAME (insn) = name;
+
+ if (bb_note != NULL_RTX && NOTE_INSN_BASIC_BLOCK_P (bb_note)
+ && BLOCK_FOR_INSN (bb_note) == bb)
+ {
+ reorder_insns_nobb (insn, insn, bb_note);
+ BB_HEAD (bb) = bb_note;
+ if (BB_END (bb) == bb_note)
+ BB_END (bb) = insn;
+ }
}
remove_node_from_expr_list (insn, &nonlocal_goto_handler_labels);
LABEL_NUSES (label)--;
}
}
-
- return next;
}
/* Like delete_insn but also purge dead edges from BB. */
-rtx
+void
delete_insn_and_edges (rtx insn)
{
- rtx x;
bool purge = false;
if (INSN_P (insn)
&& BLOCK_FOR_INSN (insn)
&& BB_END (BLOCK_FOR_INSN (insn)) == insn)
purge = true;
- x = delete_insn (insn);
+ delete_insn (insn);
if (purge)
purge_dead_edges (BLOCK_FOR_INSN (insn));
- return x;
}
/* Unlink a chain of insns between START and FINISH, leaving notes
void
delete_insn_chain (rtx start, rtx finish, bool clear_bb)
{
- rtx next;
+ rtx prev, current;
/* Unchain the insns one by one. It would be quicker to delete all of these
with a single unchaining, rather than one at a time, but we need to keep
the NOTE's. */
+ current = finish;
while (1)
{
- next = NEXT_INSN (start);
- if (NOTE_P (start) && !can_delete_note_p (start))
+ prev = PREV_INSN (current);
+ if (NOTE_P (current) && !can_delete_note_p (current))
;
else
- next = delete_insn (start);
+ delete_insn (current);
- if (clear_bb && !INSN_DELETED_P (start))
- set_block_for_insn (start, NULL);
+ if (clear_bb && !INSN_DELETED_P (current))
+ set_block_for_insn (current, NULL);
- if (start == finish)
+ if (current == start)
break;
- start = next;
+ current = prev;
}
}
\f
{
RTL_PASS,
"*free_cfg", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
NULL, /* gate */
rest_of_pass_free_cfg, /* execute */
NULL, /* sub */
}
\f
+/* Like active_insn_p, except keep the return value clobber around
+ even after reload. */
+
+static bool
+flow_active_insn_p (const_rtx insn)
+{
+ if (active_insn_p (insn))
+ return true;
+
+ /* A clobber of the function return value exists for buggy
+ programs that fail to return a value. Its effect is to
+ keep the return value from being live across the entire
+ function. If we allow it to be skipped, we introduce the
+ possibility for register lifetime confusion. */
+ if (GET_CODE (PATTERN (insn)) == CLOBBER
+ && REG_P (XEXP (PATTERN (insn), 0))
+ && REG_FUNCTION_VALUE_P (XEXP (PATTERN (insn), 0)))
+ return true;
+
+ return false;
+}
+
+/* Return true if the block has no effect and only forwards control flow to
+ its single destination. */
+
+bool
+contains_no_active_insn_p (const_basic_block bb)
+{
+ rtx insn;
+
+ if (bb == EXIT_BLOCK_PTR || bb == ENTRY_BLOCK_PTR
+ || !single_succ_p (bb))
+ return false;
+
+ for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = NEXT_INSN (insn))
+ if (INSN_P (insn) && flow_active_insn_p (insn))
+ return false;
+
+ return (!INSN_P (insn)
+ || (JUMP_P (insn) && simplejump_p (insn))
+ || !flow_active_insn_p (insn));
+}
+
+/* Likewise, but protect loop latches, headers and preheaders. */
+/* FIXME: Make this a cfg hook. */
+
+bool
+forwarder_block_p (const_basic_block bb)
+{
+ if (!contains_no_active_insn_p (bb))
+ return false;
+
+ /* Protect loop latches, headers and preheaders. */
+ if (current_loops)
+ {
+ basic_block dest;
+ if (bb->loop_father->header == bb)
+ return false;
+ dest = EDGE_SUCC (bb, 0)->dest;
+ if (dest->loop_father->header == dest)
+ return false;
+ }
+
+ return true;
+}
+
+/* Return nonzero if we can reach target from src by falling through. */
+/* FIXME: Make this a cfg hook. */
+
+bool
+can_fallthru (basic_block src, basic_block target)
+{
+ rtx insn = BB_END (src);
+ rtx insn2;
+ edge e;
+ edge_iterator ei;
+
+ if (target == EXIT_BLOCK_PTR)
+ return true;
+ if (src->next_bb != target)
+ return 0;
+ FOR_EACH_EDGE (e, ei, src->succs)
+ if (e->dest == EXIT_BLOCK_PTR
+ && e->flags & EDGE_FALLTHRU)
+ return 0;
+
+ insn2 = BB_HEAD (target);
+ if (insn2 && !active_insn_p (insn2))
+ insn2 = next_active_insn (insn2);
+
+ /* ??? Later we may add code to move jump tables offline. */
+ return next_active_insn (insn) == insn2;
+}
+
+/* Return nonzero if we could reach target from src by falling through,
+ if the target was made adjacent. If we already have a fall-through
+ edge to the exit block, we can't do that. */
+static bool
+could_fall_through (basic_block src, basic_block target)
+{
+ edge e;
+ edge_iterator ei;
+
+ if (target == EXIT_BLOCK_PTR)
+ return true;
+ FOR_EACH_EDGE (e, ei, src->succs)
+ if (e->dest == EXIT_BLOCK_PTR
+ && e->flags & EDGE_FALLTHRU)
+ return 0;
+ return true;
+}
+\f
/* Return the NOTE_INSN_BASIC_BLOCK of BB. */
rtx
bb_note (basic_block bb)
return new_bb;
}
+/* Return true if the single edge between blocks A and B is the only place
+ in RTL which holds some unique locus. */
+
+static bool
+unique_locus_on_edge_between_p (basic_block a, basic_block b)
+{
+ const location_t goto_locus = EDGE_SUCC (a, 0)->goto_locus;
+ rtx insn, end;
+
+ if (LOCATION_LOCUS (goto_locus) == UNKNOWN_LOCATION)
+ return false;
+
+ /* First scan block A backward. */
+ insn = BB_END (a);
+ end = PREV_INSN (BB_HEAD (a));
+ while (insn != end && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
+ insn = PREV_INSN (insn);
+
+ if (insn != end && INSN_LOCATION (insn) == goto_locus)
+ return false;
+
+ /* Then scan block B forward. */
+ insn = BB_HEAD (b);
+ if (insn)
+ {
+ end = NEXT_INSN (BB_END (b));
+ while (insn != end && !NONDEBUG_INSN_P (insn))
+ insn = NEXT_INSN (insn);
+
+ if (insn != end && INSN_HAS_LOCATION (insn)
+ && INSN_LOCATION (insn) == goto_locus)
+ return false;
+ }
+
+ return true;
+}
+
+/* If the single edge between blocks A and B is the only place in RTL which
+ holds some unique locus, emit a nop with that locus between the blocks. */
+
+static void
+emit_nop_for_unique_locus_between (basic_block a, basic_block b)
+{
+ if (!unique_locus_on_edge_between_p (a, b))
+ return;
+
+ BB_END (a) = emit_insn_after_noloc (gen_nop (), BB_END (a), a);
+ INSN_LOCATION (BB_END (a)) = EDGE_SUCC (a, 0)->goto_locus;
+}
+
/* Blocks A and B are to be merged into a single block A. The insns
are already contiguous. */
/* Delete everything marked above as well as crap that might be
hanging out between the two blocks. */
- BB_HEAD (b) = NULL;
+ BB_END (a) = a_end;
+ BB_HEAD (b) = b_empty ? NULL_RTX : b_head;
delete_insn_chain (del_first, del_last, true);
+ /* When not optimizing CFG and the edge is the only place in RTL which holds
+ some unique locus, emit a nop with that locus in between. */
+ if (!optimize)
+ {
+ emit_nop_for_unique_locus_between (a, b);
+ a_end = BB_END (a);
+ }
+
/* Reassociate the insns of B with A. */
if (!b_empty)
{
update_bb_for_insn_chain (a_end, b_debug_end, a);
- a_end = b_debug_end;
+ BB_END (a) = b_debug_end;
+ BB_HEAD (b) = NULL_RTX;
}
else if (b_end != b_debug_end)
{
reorder_insns_nobb (NEXT_INSN (a_end), PREV_INSN (b_debug_start),
b_debug_end);
update_bb_for_insn_chain (b_debug_start, b_debug_end, a);
- a_end = b_debug_end;
+ BB_END (a) = b_debug_end;
}
df_bb_delete (b->index);
- BB_END (a) = a_end;
/* If B was a forwarder block, propagate the locus on the edge. */
if (forwarder_p && !EDGE_SUCC (b, 0)->goto_locus)
if (BB_PARTITION (a) != BB_PARTITION (b))
return false;
+ /* Protect the loop latches. */
+ if (current_loops && b->loop_father->latch == b)
+ return false;
+
/* There must be exactly one edge in between the blocks. */
return (single_succ_p (a)
&& single_succ (a) == b
/* Selectively unlink whole insn chain. */
if (in_cfglayout)
{
- rtx insn = src->il.rtl->footer;
+ rtx insn = BB_FOOTER (src);
delete_insn_chain (kill_from, BB_END (src), false);
if (PREV_INSN (insn))
NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
else
- src->il.rtl->footer = NEXT_INSN (insn);
+ BB_FOOTER (src) = NEXT_INSN (insn);
if (NEXT_INSN (insn))
PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
}
one and create separate abnormal edge to original destination.
This allows bb-reorder to make such edge non-fallthru. */
gcc_assert (e->dest == target);
- abnormal_edge_flags = e->flags & ~(EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
- e->flags &= EDGE_FALLTHRU | EDGE_CAN_FALLTHRU;
+ abnormal_edge_flags = e->flags & ~EDGE_FALLTHRU;
+ e->flags &= EDGE_FALLTHRU;
}
else
{
}
/* If e->src ends with asm goto, see if any of the ASM_OPERANDS_LABELs
- don't point to target label. */
+ don't point to the target or fallthru label. */
if (JUMP_P (BB_END (e->src))
&& target != EXIT_BLOCK_PTR
- && e->dest == target
&& (e->flags & EDGE_FALLTHRU)
&& (note = extract_asm_operands (PATTERN (BB_END (e->src)))))
{
int i, n = ASM_OPERANDS_LABEL_LENGTH (note);
for (i = 0; i < n; ++i)
- if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (target))
- {
+ {
+ if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (e->dest))
+ XEXP (ASM_OPERANDS_LABEL (note, i), 0) = block_label (target);
+ if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (target))
asm_goto_edge = true;
- break;
- }
+ }
}
if (EDGE_COUNT (e->src->succs) >= 2 || abnormal_edge_flags || asm_goto_edge)
jump_block = create_basic_block (note, NULL, e->src);
jump_block->count = count;
jump_block->frequency = EDGE_FREQUENCY (e);
- jump_block->loop_depth = target->loop_depth;
/* Make sure new block ends up in correct hot/cold section. */
else
jump_block = e->src;
- if (e->goto_locus && e->goto_block == NULL)
- loc = e->goto_locus;
- else
- loc = 0;
+ loc = e->goto_locus;
e->flags &= ~EDGE_FALLTHRU;
if (target == EXIT_BLOCK_PTR)
{
\f
/* Print out RTL-specific basic block information (live information
- at start and end). */
+ at start and end with TDF_DETAILS). FLAGS are the TDF_* masks
+ documented in dumpfile.h. */
static void
-rtl_dump_bb (basic_block bb, FILE *outf, int indent, int flags ATTRIBUTE_UNUSED)
+rtl_dump_bb (FILE *outf, basic_block bb, int indent, int flags)
{
rtx insn;
rtx last;
memset (s_indent, ' ', (size_t) indent);
s_indent[indent] = '\0';
- if (df)
+ if (df && (flags & TDF_DETAILS))
{
df_dump_top (bb, outf);
putc ('\n', outf);
if (bb->index != ENTRY_BLOCK && bb->index != EXIT_BLOCK)
for (insn = BB_HEAD (bb), last = NEXT_INSN (BB_END (bb)); insn != last;
insn = NEXT_INSN (insn))
- print_rtl_single (outf, insn);
+ {
+ if (flags & TDF_DETAILS)
+ df_dump_insn_top (insn, outf);
+ if (! (flags & TDF_SLIM))
+ print_rtl_single (outf, insn);
+ else
+ dump_insn_slim (outf, insn);
+ if (flags & TDF_DETAILS)
+ df_dump_insn_bottom (insn, outf);
+ }
- if (df)
+ if (df && (flags & TDF_DETAILS))
{
df_dump_bottom (bb, outf);
putc ('\n', outf);
}
\f
-/* Like print_rtl, but also print out live information for the start of each
- basic block. */
+/* Like dump_function_to_file, but for RTL. Print out dataflow information
+ for the start of each basic block. FLAGS are the TDF_* masks documented
+ in dumpfile.h. */
void
-print_rtl_with_bb (FILE *outf, const_rtx rtx_first)
+print_rtl_with_bb (FILE *outf, const_rtx rtx_first, int flags)
{
const_rtx tmp_rtx;
if (rtx_first == 0)
basic_block *start = XCNEWVEC (basic_block, max_uid);
basic_block *end = XCNEWVEC (basic_block, max_uid);
enum bb_state *in_bb_p = XCNEWVEC (enum bb_state, max_uid);
-
basic_block bb;
+ /* After freeing the CFG, we still have BLOCK_FOR_INSN set on most
+ insns, but the CFG is not maintained so the basic block info
+ is not reliable. Therefore it's omitted from the dumps. */
+ if (! (cfun->curr_properties & PROP_cfg))
+ flags &= ~TDF_BLOCKS;
+
if (df)
df_dump_start (outf);
- FOR_EACH_BB_REVERSE (bb)
+ if (flags & TDF_BLOCKS)
{
- rtx x;
-
- start[INSN_UID (BB_HEAD (bb))] = bb;
- end[INSN_UID (BB_END (bb))] = bb;
- for (x = BB_HEAD (bb); x != NULL_RTX; x = NEXT_INSN (x))
+ FOR_EACH_BB_REVERSE (bb)
{
- enum bb_state state = IN_MULTIPLE_BB;
+ rtx x;
+
+ start[INSN_UID (BB_HEAD (bb))] = bb;
+ end[INSN_UID (BB_END (bb))] = bb;
+ for (x = BB_HEAD (bb); x != NULL_RTX; x = NEXT_INSN (x))
+ {
+ enum bb_state state = IN_MULTIPLE_BB;
- if (in_bb_p[INSN_UID (x)] == NOT_IN_BB)
- state = IN_ONE_BB;
- in_bb_p[INSN_UID (x)] = state;
+ if (in_bb_p[INSN_UID (x)] == NOT_IN_BB)
+ state = IN_ONE_BB;
+ in_bb_p[INSN_UID (x)] = state;
- if (x == BB_END (bb))
- break;
+ if (x == BB_END (bb))
+ break;
+ }
}
}
for (tmp_rtx = rtx_first; NULL != tmp_rtx; tmp_rtx = NEXT_INSN (tmp_rtx))
{
- int did_output;
-
- bb = start[INSN_UID (tmp_rtx)];
- if (bb != NULL)
- dump_bb_info (bb, true, false, dump_flags, ";; ", outf);
+ if (flags & TDF_BLOCKS)
+ {
+ bb = start[INSN_UID (tmp_rtx)];
+ if (bb != NULL)
+ {
+ dump_bb_info (outf, bb, 0, dump_flags | TDF_COMMENT, true, false);
+ if (df && (flags & TDF_DETAILS))
+ df_dump_top (bb, outf);
+ }
- if (in_bb_p[INSN_UID (tmp_rtx)] == NOT_IN_BB
- && !NOTE_P (tmp_rtx)
- && !BARRIER_P (tmp_rtx))
- fprintf (outf, ";; Insn is not within a basic block\n");
- else if (in_bb_p[INSN_UID (tmp_rtx)] == IN_MULTIPLE_BB)
- fprintf (outf, ";; Insn is in multiple basic blocks\n");
+ if (in_bb_p[INSN_UID (tmp_rtx)] == NOT_IN_BB
+ && !NOTE_P (tmp_rtx)
+ && !BARRIER_P (tmp_rtx))
+ fprintf (outf, ";; Insn is not within a basic block\n");
+ else if (in_bb_p[INSN_UID (tmp_rtx)] == IN_MULTIPLE_BB)
+ fprintf (outf, ";; Insn is in multiple basic blocks\n");
+ }
- did_output = print_rtl_single (outf, tmp_rtx);
+ if (flags & TDF_DETAILS)
+ df_dump_insn_top (tmp_rtx, outf);
+ if (! (flags & TDF_SLIM))
+ print_rtl_single (outf, tmp_rtx);
+ else
+ dump_insn_slim (outf, tmp_rtx);
+ if (flags & TDF_DETAILS)
+ df_dump_insn_bottom (tmp_rtx, outf);
- bb = end[INSN_UID (tmp_rtx)];
- if (bb != NULL)
- dump_bb_info (bb, false, true, dump_flags, ";; ", outf);
- if (did_output)
- putc ('\n', outf);
+ if (flags & TDF_BLOCKS)
+ {
+ bb = end[INSN_UID (tmp_rtx)];
+ if (bb != NULL)
+ {
+ dump_bb_info (outf, bb, 0, dump_flags | TDF_COMMENT, false, true);
+ if (df && (flags & TDF_DETAILS))
+ df_dump_bottom (bb, outf);
+ putc ('\n', outf);
+ }
+ }
}
free (start);
}
}
\f
+/* Update the branch probability of BB if a REG_BR_PROB is present. */
+
void
update_br_prob_note (basic_block bb)
{
err = 1;
}
- for (insn = bb->il.rtl->header; insn; insn = NEXT_INSN (insn))
+ for (insn = BB_HEADER (bb); insn; insn = NEXT_INSN (insn))
if (!BARRIER_P (insn)
&& BLOCK_FOR_INSN (insn) != NULL)
{
INSN_UID (insn), bb->index);
err = 1;
}
- for (insn = bb->il.rtl->footer; insn; insn = NEXT_INSN (insn))
+ for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
if (!BARRIER_P (insn)
&& BLOCK_FOR_INSN (insn) != NULL)
{
return inserted;
}
+\f
+/* Cut the insns from FIRST to LAST out of the insns stream. */
-/* Same as split_block but update cfg_layout structures. */
-
-static basic_block
-cfg_layout_split_block (basic_block bb, void *insnp)
+rtx
+unlink_insn_chain (rtx first, rtx last)
{
- rtx insn = (rtx) insnp;
- basic_block new_bb = rtl_split_block (bb, insn);
-
- new_bb->il.rtl->footer = bb->il.rtl->footer;
- bb->il.rtl->footer = NULL;
-
- return new_bb;
+ rtx prevfirst = PREV_INSN (first);
+ rtx nextlast = NEXT_INSN (last);
+
+ PREV_INSN (first) = NULL;
+ NEXT_INSN (last) = NULL;
+ if (prevfirst)
+ NEXT_INSN (prevfirst) = nextlast;
+ if (nextlast)
+ PREV_INSN (nextlast) = prevfirst;
+ else
+ set_last_insn (prevfirst);
+ if (!prevfirst)
+ set_first_insn (nextlast);
+ return first;
}
+\f
+/* Skip over inter-block insns occurring after BB which are typically
+ associated with BB (e.g., barriers). If there are any such insns,
+ we return the last one. Otherwise, we return the end of BB. */
-/* Redirect Edge to DEST. */
-static edge
-cfg_layout_redirect_edge_and_branch (edge e, basic_block dest)
+static rtx
+skip_insns_after_block (basic_block bb)
{
- basic_block src = e->src;
- edge ret;
-
- if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
- return NULL;
+ rtx insn, last_insn, next_head, prev;
- if (e->dest == dest)
- return e;
-
- if (e->src != ENTRY_BLOCK_PTR
- && (ret = try_redirect_by_replacing_jump (e, dest, true)))
- {
- df_set_bb_dirty (src);
- return ret;
- }
+ next_head = NULL_RTX;
+ if (bb->next_bb != EXIT_BLOCK_PTR)
+ next_head = BB_HEAD (bb->next_bb);
- if (e->src == ENTRY_BLOCK_PTR
- && (e->flags & EDGE_FALLTHRU) && !(e->flags & EDGE_COMPLEX))
+ for (last_insn = insn = BB_END (bb); (insn = NEXT_INSN (insn)) != 0; )
{
- if (dump_file)
- fprintf (dump_file, "Redirecting entry edge from bb %i to %i\n",
- e->src->index, dest->index);
+ if (insn == next_head)
+ break;
- df_set_bb_dirty (e->src);
- redirect_edge_succ (e, dest);
- return e;
- }
+ switch (GET_CODE (insn))
+ {
+ case BARRIER:
+ last_insn = insn;
+ continue;
- /* Redirect_edge_and_branch may decide to turn branch into fallthru edge
- in the case the basic block appears to be in sequence. Avoid this
- transformation. */
+ case NOTE:
+ switch (NOTE_KIND (insn))
+ {
+ case NOTE_INSN_BLOCK_END:
+ gcc_unreachable ();
+ continue;
+ default:
+ continue;
+ break;
+ }
+ break;
- if (e->flags & EDGE_FALLTHRU)
- {
- /* Redirect any branch edges unified with the fallthru one. */
- if (JUMP_P (BB_END (src))
- && label_is_jump_target_p (BB_HEAD (e->dest),
- BB_END (src)))
- {
- edge redirected;
+ case CODE_LABEL:
+ if (NEXT_INSN (insn)
+ && JUMP_TABLE_DATA_P (NEXT_INSN (insn)))
+ {
+ insn = NEXT_INSN (insn);
+ last_insn = insn;
+ continue;
+ }
+ break;
- if (dump_file)
- fprintf (dump_file, "Fallthru edge unified with branch "
- "%i->%i redirected to %i\n",
- e->src->index, e->dest->index, dest->index);
- e->flags &= ~EDGE_FALLTHRU;
- redirected = redirect_branch_edge (e, dest);
- gcc_assert (redirected);
- redirected->flags |= EDGE_FALLTHRU;
- df_set_bb_dirty (redirected->src);
- return redirected;
+ default:
+ break;
}
- /* In case we are redirecting fallthru edge to the branch edge
- of conditional jump, remove it. */
- if (EDGE_COUNT (src->succs) == 2)
- {
- /* Find the edge that is different from E. */
- edge s = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e);
- if (s->dest == dest
- && any_condjump_p (BB_END (src))
- && onlyjump_p (BB_END (src)))
- delete_insn (BB_END (src));
- }
- if (dump_file)
- fprintf (dump_file, "Redirecting fallthru edge %i->%i to %i\n",
- e->src->index, e->dest->index, dest->index);
- ret = redirect_edge_succ_nodup (e, dest);
+ break;
}
- else
- ret = redirect_branch_edge (e, dest);
- /* We don't want simplejumps in the insn stream during cfglayout. */
- gcc_assert (!simplejump_p (BB_END (src)));
+ /* It is possible to hit contradictory sequence. For instance:
- df_set_bb_dirty (src);
- return ret;
+ jump_insn
+ NOTE_INSN_BLOCK_BEG
+ barrier
+
+ Where barrier belongs to jump_insn, but the note does not. This can be
+ created by removing the basic block originally following
+ NOTE_INSN_BLOCK_BEG. In such case reorder the notes. */
+
+ for (insn = last_insn; insn != BB_END (bb); insn = prev)
+ {
+ prev = PREV_INSN (insn);
+ if (NOTE_P (insn))
+ switch (NOTE_KIND (insn))
+ {
+ case NOTE_INSN_BLOCK_END:
+ gcc_unreachable ();
+ break;
+ case NOTE_INSN_DELETED:
+ case NOTE_INSN_DELETED_LABEL:
+ case NOTE_INSN_DELETED_DEBUG_LABEL:
+ continue;
+ default:
+ reorder_insns (insn, insn, last_insn);
+ }
+ }
+
+ return last_insn;
}
-/* Simple wrapper as we always can redirect fallthru edges. */
-static basic_block
-cfg_layout_redirect_edge_and_branch_force (edge e, basic_block dest)
+/* Locate or create a label for a given basic block. */
+
+static rtx
+label_for_bb (basic_block bb)
{
- edge redirected = cfg_layout_redirect_edge_and_branch (e, dest);
+ rtx label = BB_HEAD (bb);
- gcc_assert (redirected);
- return NULL;
+ if (!LABEL_P (label))
+ {
+ if (dump_file)
+ fprintf (dump_file, "Emitting label for block %d\n", bb->index);
+
+ label = block_label (bb);
+ }
+
+ return label;
}
-/* Same as delete_basic_block but update cfg_layout structures. */
+/* Locate the effective beginning and end of the insn chain for each
+ block, as defined by skip_insns_after_block above. */
static void
-cfg_layout_delete_block (basic_block bb)
+record_effective_endpoints (void)
{
- rtx insn, next, prev = PREV_INSN (BB_HEAD (bb)), *to, remaints;
+ rtx next_insn;
+ basic_block bb;
+ rtx insn;
+
+ for (insn = get_insns ();
+ insn
+ && NOTE_P (insn)
+ && NOTE_KIND (insn) != NOTE_INSN_BASIC_BLOCK;
+ insn = NEXT_INSN (insn))
+ continue;
+ /* No basic blocks at all? */
+ gcc_assert (insn);
+
+ if (PREV_INSN (insn))
+ cfg_layout_function_header =
+ unlink_insn_chain (get_insns (), PREV_INSN (insn));
+ else
+ cfg_layout_function_header = NULL_RTX;
+
+ next_insn = get_insns ();
+ FOR_EACH_BB (bb)
+ {
+ rtx end;
+
+ if (PREV_INSN (BB_HEAD (bb)) && next_insn != BB_HEAD (bb))
+ BB_HEADER (bb) = unlink_insn_chain (next_insn,
+ PREV_INSN (BB_HEAD (bb)));
+ end = skip_insns_after_block (bb);
+ if (NEXT_INSN (BB_END (bb)) && BB_END (bb) != end)
+ BB_FOOTER (bb) = unlink_insn_chain (NEXT_INSN (BB_END (bb)), end);
+ next_insn = NEXT_INSN (BB_END (bb));
+ }
+
+ cfg_layout_function_footer = next_insn;
+ if (cfg_layout_function_footer)
+ cfg_layout_function_footer = unlink_insn_chain (cfg_layout_function_footer, get_last_insn ());
+}
+\f
+static unsigned int
+into_cfg_layout_mode (void)
+{
+ cfg_layout_initialize (0);
+ return 0;
+}
+
+static unsigned int
+outof_cfg_layout_mode (void)
+{
+ basic_block bb;
+
+ FOR_EACH_BB (bb)
+ if (bb->next_bb != EXIT_BLOCK_PTR)
+ bb->aux = bb->next_bb;
+
+ cfg_layout_finalize ();
+
+ return 0;
+}
+
+struct rtl_opt_pass pass_into_cfg_layout_mode =
+{
+ {
+ RTL_PASS,
+ "into_cfglayout", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ NULL, /* gate */
+ into_cfg_layout_mode, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_CFG, /* tv_id */
+ 0, /* properties_required */
+ PROP_cfglayout, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0 /* todo_flags_finish */
+ }
+};
+
+struct rtl_opt_pass pass_outof_cfg_layout_mode =
+{
+ {
+ RTL_PASS,
+ "outof_cfglayout", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ NULL, /* gate */
+ outof_cfg_layout_mode, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_CFG, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ PROP_cfglayout, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0 /* todo_flags_finish */
+ }
+};
+\f
+
+/* Link the basic blocks in the correct order, compacting the basic
+ block queue while at it. If STAY_IN_CFGLAYOUT_MODE is false, this
+ function also clears the basic block header and footer fields.
+
+ This function is usually called after a pass (e.g. tracer) finishes
+ some transformations while in cfglayout mode. The required sequence
+ of the basic blocks is in a linked list along the bb->aux field.
+ This functions re-links the basic block prev_bb and next_bb pointers
+ accordingly, and it compacts and renumbers the blocks.
+
+ FIXME: This currently works only for RTL, but the only RTL-specific
+ bits are the STAY_IN_CFGLAYOUT_MODE bits. The tracer pass was moved
+ to GIMPLE a long time ago, but it doesn't relink the basic block
+ chain. It could do that (to give better initial RTL) if this function
+ is made IR-agnostic (and moved to cfganal.c or cfg.c while at it). */
+
+void
+relink_block_chain (bool stay_in_cfglayout_mode)
+{
+ basic_block bb, prev_bb;
+ int index;
+
+ /* Maybe dump the re-ordered sequence. */
+ if (dump_file)
+ {
+ fprintf (dump_file, "Reordered sequence:\n");
+ for (bb = ENTRY_BLOCK_PTR->next_bb, index = NUM_FIXED_BLOCKS;
+ bb;
+ bb = (basic_block) bb->aux, index++)
+ {
+ fprintf (dump_file, " %i ", index);
+ if (get_bb_original (bb))
+ fprintf (dump_file, "duplicate of %i ",
+ get_bb_original (bb)->index);
+ else if (forwarder_block_p (bb)
+ && !LABEL_P (BB_HEAD (bb)))
+ fprintf (dump_file, "compensation ");
+ else
+ fprintf (dump_file, "bb %i ", bb->index);
+ fprintf (dump_file, " [%i]\n", bb->frequency);
+ }
+ }
+
+ /* Now reorder the blocks. */
+ prev_bb = ENTRY_BLOCK_PTR;
+ bb = ENTRY_BLOCK_PTR->next_bb;
+ for (; bb; prev_bb = bb, bb = (basic_block) bb->aux)
+ {
+ bb->prev_bb = prev_bb;
+ prev_bb->next_bb = bb;
+ }
+ prev_bb->next_bb = EXIT_BLOCK_PTR;
+ EXIT_BLOCK_PTR->prev_bb = prev_bb;
+
+ /* Then, clean up the aux fields. */
+ FOR_ALL_BB (bb)
+ {
+ bb->aux = NULL;
+ if (!stay_in_cfglayout_mode)
+ BB_HEADER (bb) = BB_FOOTER (bb) = NULL;
+ }
+
+ /* Maybe reset the original copy tables, they are not valid anymore
+ when we renumber the basic blocks in compact_blocks. If we are
+ are going out of cfglayout mode, don't re-allocate the tables. */
+ free_original_copy_tables ();
+ if (stay_in_cfglayout_mode)
+ initialize_original_copy_tables ();
+
+ /* Finally, put basic_block_info in the new order. */
+ compact_blocks ();
+}
+\f
+
+/* Given a reorder chain, rearrange the code to match. */
+
+static void
+fixup_reorder_chain (void)
+{
+ basic_block bb;
+ rtx insn = NULL;
+
+ if (cfg_layout_function_header)
+ {
+ set_first_insn (cfg_layout_function_header);
+ insn = cfg_layout_function_header;
+ while (NEXT_INSN (insn))
+ insn = NEXT_INSN (insn);
+ }
+
+ /* First do the bulk reordering -- rechain the blocks without regard to
+ the needed changes to jumps and labels. */
+
+ for (bb = ENTRY_BLOCK_PTR->next_bb; bb; bb = (basic_block) bb->aux)
+ {
+ if (BB_HEADER (bb))
+ {
+ if (insn)
+ NEXT_INSN (insn) = BB_HEADER (bb);
+ else
+ set_first_insn (BB_HEADER (bb));
+ PREV_INSN (BB_HEADER (bb)) = insn;
+ insn = BB_HEADER (bb);
+ while (NEXT_INSN (insn))
+ insn = NEXT_INSN (insn);
+ }
+ if (insn)
+ NEXT_INSN (insn) = BB_HEAD (bb);
+ else
+ set_first_insn (BB_HEAD (bb));
+ PREV_INSN (BB_HEAD (bb)) = insn;
+ insn = BB_END (bb);
+ if (BB_FOOTER (bb))
+ {
+ NEXT_INSN (insn) = BB_FOOTER (bb);
+ PREV_INSN (BB_FOOTER (bb)) = insn;
+ while (NEXT_INSN (insn))
+ insn = NEXT_INSN (insn);
+ }
+ }
+
+ NEXT_INSN (insn) = cfg_layout_function_footer;
+ if (cfg_layout_function_footer)
+ PREV_INSN (cfg_layout_function_footer) = insn;
+
+ while (NEXT_INSN (insn))
+ insn = NEXT_INSN (insn);
+
+ set_last_insn (insn);
+#ifdef ENABLE_CHECKING
+ verify_insn_chain ();
+#endif
+
+ /* Now add jumps and labels as needed to match the blocks new
+ outgoing edges. */
+
+ for (bb = ENTRY_BLOCK_PTR->next_bb; bb ; bb = (basic_block) bb->aux)
+ {
+ edge e_fall, e_taken, e;
+ rtx bb_end_insn;
+ rtx ret_label = NULL_RTX;
+ basic_block nb, src_bb;
+ edge_iterator ei;
+
+ if (EDGE_COUNT (bb->succs) == 0)
+ continue;
+
+ /* Find the old fallthru edge, and another non-EH edge for
+ a taken jump. */
+ e_taken = e_fall = NULL;
+
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (e->flags & EDGE_FALLTHRU)
+ e_fall = e;
+ else if (! (e->flags & EDGE_EH))
+ e_taken = e;
+
+ bb_end_insn = BB_END (bb);
+ if (JUMP_P (bb_end_insn))
+ {
+ ret_label = JUMP_LABEL (bb_end_insn);
+ if (any_condjump_p (bb_end_insn))
+ {
+ /* This might happen if the conditional jump has side
+ effects and could therefore not be optimized away.
+ Make the basic block to end with a barrier in order
+ to prevent rtl_verify_flow_info from complaining. */
+ if (!e_fall)
+ {
+ gcc_assert (!onlyjump_p (bb_end_insn)
+ || returnjump_p (bb_end_insn));
+ BB_FOOTER (bb) = emit_barrier_after (bb_end_insn);
+ continue;
+ }
+
+ /* If the old fallthru is still next, nothing to do. */
+ if (bb->aux == e_fall->dest
+ || e_fall->dest == EXIT_BLOCK_PTR)
+ continue;
+
+ /* The degenerated case of conditional jump jumping to the next
+ instruction can happen for jumps with side effects. We need
+ to construct a forwarder block and this will be done just
+ fine by force_nonfallthru below. */
+ if (!e_taken)
+ ;
+
+ /* There is another special case: if *neither* block is next,
+ such as happens at the very end of a function, then we'll
+ need to add a new unconditional jump. Choose the taken
+ edge based on known or assumed probability. */
+ else if (bb->aux != e_taken->dest)
+ {
+ rtx note = find_reg_note (bb_end_insn, REG_BR_PROB, 0);
+
+ if (note
+ && INTVAL (XEXP (note, 0)) < REG_BR_PROB_BASE / 2
+ && invert_jump (bb_end_insn,
+ (e_fall->dest == EXIT_BLOCK_PTR
+ ? NULL_RTX
+ : label_for_bb (e_fall->dest)), 0))
+ {
+ e_fall->flags &= ~EDGE_FALLTHRU;
+ gcc_checking_assert (could_fall_through
+ (e_taken->src, e_taken->dest));
+ e_taken->flags |= EDGE_FALLTHRU;
+ update_br_prob_note (bb);
+ e = e_fall, e_fall = e_taken, e_taken = e;
+ }
+ }
+
+ /* If the "jumping" edge is a crossing edge, and the fall
+ through edge is non-crossing, leave things as they are. */
+ else if ((e_taken->flags & EDGE_CROSSING)
+ && !(e_fall->flags & EDGE_CROSSING))
+ continue;
+
+ /* Otherwise we can try to invert the jump. This will
+ basically never fail, however, keep up the pretense. */
+ else if (invert_jump (bb_end_insn,
+ (e_fall->dest == EXIT_BLOCK_PTR
+ ? NULL_RTX
+ : label_for_bb (e_fall->dest)), 0))
+ {
+ e_fall->flags &= ~EDGE_FALLTHRU;
+ gcc_checking_assert (could_fall_through
+ (e_taken->src, e_taken->dest));
+ e_taken->flags |= EDGE_FALLTHRU;
+ update_br_prob_note (bb);
+ if (LABEL_NUSES (ret_label) == 0
+ && single_pred_p (e_taken->dest))
+ delete_insn (ret_label);
+ continue;
+ }
+ }
+ else if (extract_asm_operands (PATTERN (bb_end_insn)) != NULL)
+ {
+ /* If the old fallthru is still next or if
+ asm goto doesn't have a fallthru (e.g. when followed by
+ __builtin_unreachable ()), nothing to do. */
+ if (! e_fall
+ || bb->aux == e_fall->dest
+ || e_fall->dest == EXIT_BLOCK_PTR)
+ continue;
+
+ /* Otherwise we'll have to use the fallthru fixup below. */
+ }
+ else
+ {
+ /* Otherwise we have some return, switch or computed
+ jump. In the 99% case, there should not have been a
+ fallthru edge. */
+ gcc_assert (returnjump_p (bb_end_insn) || !e_fall);
+ continue;
+ }
+ }
+ else
+ {
+ /* No fallthru implies a noreturn function with EH edges, or
+ something similarly bizarre. In any case, we don't need to
+ do anything. */
+ if (! e_fall)
+ continue;
+
+ /* If the fallthru block is still next, nothing to do. */
+ if (bb->aux == e_fall->dest)
+ continue;
+
+ /* A fallthru to exit block. */
+ if (e_fall->dest == EXIT_BLOCK_PTR)
+ continue;
+ }
+
+ /* We got here if we need to add a new jump insn.
+ Note force_nonfallthru can delete E_FALL and thus we have to
+ save E_FALL->src prior to the call to force_nonfallthru. */
+ src_bb = e_fall->src;
+ nb = force_nonfallthru_and_redirect (e_fall, e_fall->dest, ret_label);
+ if (nb)
+ {
+ nb->aux = bb->aux;
+ bb->aux = nb;
+ /* Don't process this new block. */
+ bb = nb;
+
+ /* Make sure new bb is tagged for correct section (same as
+ fall-thru source, since you cannot fall-thru across
+ section boundaries). */
+ BB_COPY_PARTITION (src_bb, single_pred (bb));
+ if (flag_reorder_blocks_and_partition
+ && targetm_common.have_named_sections
+ && JUMP_P (BB_END (bb))
+ && !any_condjump_p (BB_END (bb))
+ && (EDGE_SUCC (bb, 0)->flags & EDGE_CROSSING))
+ add_reg_note (BB_END (bb), REG_CROSSING_JUMP, NULL_RTX);
+ }
+ }
+
+ relink_block_chain (/*stay_in_cfglayout_mode=*/false);
+
+ /* Annoying special case - jump around dead jumptables left in the code. */
+ FOR_EACH_BB (bb)
+ {
+ edge e = find_fallthru_edge (bb->succs);
+
+ if (e && !can_fallthru (e->src, e->dest))
+ force_nonfallthru (e);
+ }
+
+ /* Ensure goto_locus from edges has some instructions with that locus
+ in RTL. */
+ if (!optimize)
+ FOR_EACH_BB (bb)
+ {
+ edge e;
+ edge_iterator ei;
+
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (LOCATION_LOCUS (e->goto_locus) != UNKNOWN_LOCATION
+ && !(e->flags & EDGE_ABNORMAL))
+ {
+ edge e2;
+ edge_iterator ei2;
+ basic_block dest, nb;
+ rtx end;
+
+ insn = BB_END (e->src);
+ end = PREV_INSN (BB_HEAD (e->src));
+ while (insn != end
+ && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
+ insn = PREV_INSN (insn);
+ if (insn != end
+ && INSN_LOCATION (insn) == e->goto_locus)
+ continue;
+ if (simplejump_p (BB_END (e->src))
+ && !INSN_HAS_LOCATION (BB_END (e->src)))
+ {
+ INSN_LOCATION (BB_END (e->src)) = e->goto_locus;
+ continue;
+ }
+ dest = e->dest;
+ if (dest == EXIT_BLOCK_PTR)
+ {
+ /* Non-fallthru edges to the exit block cannot be split. */
+ if (!(e->flags & EDGE_FALLTHRU))
+ continue;
+ }
+ else
+ {
+ insn = BB_HEAD (dest);
+ end = NEXT_INSN (BB_END (dest));
+ while (insn != end && !NONDEBUG_INSN_P (insn))
+ insn = NEXT_INSN (insn);
+ if (insn != end && INSN_HAS_LOCATION (insn)
+ && INSN_LOCATION (insn) == e->goto_locus)
+ continue;
+ }
+ nb = split_edge (e);
+ if (!INSN_P (BB_END (nb)))
+ BB_END (nb) = emit_insn_after_noloc (gen_nop (), BB_END (nb),
+ nb);
+ INSN_LOCATION (BB_END (nb)) = e->goto_locus;
+
+ /* If there are other incoming edges to the destination block
+ with the same goto locus, redirect them to the new block as
+ well, this can prevent other such blocks from being created
+ in subsequent iterations of the loop. */
+ for (ei2 = ei_start (dest->preds); (e2 = ei_safe_edge (ei2)); )
+ if (LOCATION_LOCUS (e2->goto_locus) != UNKNOWN_LOCATION
+ && !(e2->flags & (EDGE_ABNORMAL | EDGE_FALLTHRU))
+ && e->goto_locus == e2->goto_locus)
+ redirect_edge_and_branch (e2, nb);
+ else
+ ei_next (&ei2);
+ }
+ }
+}
+\f
+/* Perform sanity checks on the insn chain.
+ 1. Check that next/prev pointers are consistent in both the forward and
+ reverse direction.
+ 2. Count insns in chain, going both directions, and check if equal.
+ 3. Check that get_last_insn () returns the actual end of chain. */
+
+DEBUG_FUNCTION void
+verify_insn_chain (void)
+{
+ rtx x, prevx, nextx;
+ int insn_cnt1, insn_cnt2;
+
+ for (prevx = NULL, insn_cnt1 = 1, x = get_insns ();
+ x != 0;
+ prevx = x, insn_cnt1++, x = NEXT_INSN (x))
+ gcc_assert (PREV_INSN (x) == prevx);
+
+ gcc_assert (prevx == get_last_insn ());
+
+ for (nextx = NULL, insn_cnt2 = 1, x = get_last_insn ();
+ x != 0;
+ nextx = x, insn_cnt2++, x = PREV_INSN (x))
+ gcc_assert (NEXT_INSN (x) == nextx);
+
+ gcc_assert (insn_cnt1 == insn_cnt2);
+}
+\f
+/* If we have assembler epilogues, the block falling through to exit must
+ be the last one in the reordered chain when we reach final. Ensure
+ that this condition is met. */
+static void
+fixup_fallthru_exit_predecessor (void)
+{
+ edge e;
+ basic_block bb = NULL;
+
+ /* This transformation is not valid before reload, because we might
+ separate a call from the instruction that copies the return
+ value. */
+ gcc_assert (reload_completed);
+
+ e = find_fallthru_edge (EXIT_BLOCK_PTR->preds);
+ if (e)
+ bb = e->src;
+
+ if (bb && bb->aux)
+ {
+ basic_block c = ENTRY_BLOCK_PTR->next_bb;
+
+ /* If the very first block is the one with the fall-through exit
+ edge, we have to split that block. */
+ if (c == bb)
+ {
+ bb = split_block (bb, NULL)->dest;
+ bb->aux = c->aux;
+ c->aux = bb;
+ BB_FOOTER (bb) = BB_FOOTER (c);
+ BB_FOOTER (c) = NULL;
+ }
+
+ while (c->aux != bb)
+ c = (basic_block) c->aux;
+
+ c->aux = bb->aux;
+ while (c->aux)
+ c = (basic_block) c->aux;
+
+ c->aux = bb;
+ bb->aux = NULL;
+ }
+}
+
+/* In case there are more than one fallthru predecessors of exit, force that
+ there is only one. */
+
+static void
+force_one_exit_fallthru (void)
+{
+ edge e, predecessor = NULL;
+ bool more = false;
+ edge_iterator ei;
+ basic_block forwarder, bb;
+
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
+ if (e->flags & EDGE_FALLTHRU)
+ {
+ if (predecessor == NULL)
+ predecessor = e;
+ else
+ {
+ more = true;
+ break;
+ }
+ }
+
+ if (!more)
+ return;
+
+ /* Exit has several fallthru predecessors. Create a forwarder block for
+ them. */
+ forwarder = split_edge (predecessor);
+ for (ei = ei_start (EXIT_BLOCK_PTR->preds); (e = ei_safe_edge (ei)); )
+ {
+ if (e->src == forwarder
+ || !(e->flags & EDGE_FALLTHRU))
+ ei_next (&ei);
+ else
+ redirect_edge_and_branch_force (e, forwarder);
+ }
+
+ /* Fix up the chain of blocks -- make FORWARDER immediately precede the
+ exit block. */
+ FOR_EACH_BB (bb)
+ {
+ if (bb->aux == NULL && bb != forwarder)
+ {
+ bb->aux = forwarder;
+ break;
+ }
+ }
+}
+\f
+/* Return true in case it is possible to duplicate the basic block BB. */
+
+static bool
+cfg_layout_can_duplicate_bb_p (const_basic_block bb)
+{
+ /* Do not attempt to duplicate tablejumps, as we need to unshare
+ the dispatch table. This is difficult to do, as the instructions
+ computing jump destination may be hoisted outside the basic block. */
+ if (tablejump_p (BB_END (bb), NULL, NULL))
+ return false;
+
+ /* Do not duplicate blocks containing insns that can't be copied. */
+ if (targetm.cannot_copy_insn_p)
+ {
+ rtx insn = BB_HEAD (bb);
+ while (1)
+ {
+ if (INSN_P (insn) && targetm.cannot_copy_insn_p (insn))
+ return false;
+ if (insn == BB_END (bb))
+ break;
+ insn = NEXT_INSN (insn);
+ }
+ }
+
+ return true;
+}
+
+rtx
+duplicate_insn_chain (rtx from, rtx to)
+{
+ rtx insn, last, copy;
+
+ /* Avoid updating of boundaries of previous basic block. The
+ note will get removed from insn stream in fixup. */
+ last = emit_note (NOTE_INSN_DELETED);
+
+ /* Create copy at the end of INSN chain. The chain will
+ be reordered later. */
+ for (insn = from; insn != NEXT_INSN (to); insn = NEXT_INSN (insn))
+ {
+ switch (GET_CODE (insn))
+ {
+ case DEBUG_INSN:
+ /* Don't duplicate label debug insns. */
+ if (TREE_CODE (INSN_VAR_LOCATION_DECL (insn)) == LABEL_DECL)
+ break;
+ /* FALLTHRU */
+ case INSN:
+ case CALL_INSN:
+ case JUMP_INSN:
+ /* Avoid copying of dispatch tables. We never duplicate
+ tablejumps, so this can hit only in case the table got
+ moved far from original jump. */
+ if (GET_CODE (PATTERN (insn)) == ADDR_VEC
+ || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
+ {
+ /* Avoid copying following barrier as well if any
+ (and debug insns in between). */
+ rtx next;
+
+ for (next = NEXT_INSN (insn);
+ next != NEXT_INSN (to);
+ next = NEXT_INSN (next))
+ if (!DEBUG_INSN_P (next))
+ break;
+ if (next != NEXT_INSN (to) && BARRIER_P (next))
+ insn = next;
+ break;
+ }
+ copy = emit_copy_of_insn_after (insn, get_last_insn ());
+ if (JUMP_P (insn) && JUMP_LABEL (insn) != NULL_RTX
+ && ANY_RETURN_P (JUMP_LABEL (insn)))
+ JUMP_LABEL (copy) = JUMP_LABEL (insn);
+ maybe_copy_prologue_epilogue_insn (insn, copy);
+ break;
+
+ case CODE_LABEL:
+ break;
+
+ case BARRIER:
+ emit_barrier ();
+ break;
+
+ case NOTE:
+ switch (NOTE_KIND (insn))
+ {
+ /* In case prologue is empty and function contain label
+ in first BB, we may want to copy the block. */
+ case NOTE_INSN_PROLOGUE_END:
+
+ case NOTE_INSN_DELETED:
+ case NOTE_INSN_DELETED_LABEL:
+ case NOTE_INSN_DELETED_DEBUG_LABEL:
+ /* No problem to strip these. */
+ case NOTE_INSN_FUNCTION_BEG:
+ /* There is always just single entry to function. */
+ case NOTE_INSN_BASIC_BLOCK:
+ break;
- if (bb->il.rtl->header)
+ case NOTE_INSN_EPILOGUE_BEG:
+ case NOTE_INSN_SWITCH_TEXT_SECTIONS:
+ emit_note_copy (insn);
+ break;
+
+ default:
+ /* All other notes should have already been eliminated. */
+ gcc_unreachable ();
+ }
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+ insn = NEXT_INSN (last);
+ delete_insn (last);
+ return insn;
+}
+
+/* Create a duplicate of the basic block BB. */
+
+static basic_block
+cfg_layout_duplicate_bb (basic_block bb)
+{
+ rtx insn;
+ basic_block new_bb;
+
+ insn = duplicate_insn_chain (BB_HEAD (bb), BB_END (bb));
+ new_bb = create_basic_block (insn,
+ insn ? get_last_insn () : NULL,
+ EXIT_BLOCK_PTR->prev_bb);
+
+ BB_COPY_PARTITION (new_bb, bb);
+ if (BB_HEADER (bb))
+ {
+ insn = BB_HEADER (bb);
+ while (NEXT_INSN (insn))
+ insn = NEXT_INSN (insn);
+ insn = duplicate_insn_chain (BB_HEADER (bb), insn);
+ if (insn)
+ BB_HEADER (new_bb) = unlink_insn_chain (insn, get_last_insn ());
+ }
+
+ if (BB_FOOTER (bb))
+ {
+ insn = BB_FOOTER (bb);
+ while (NEXT_INSN (insn))
+ insn = NEXT_INSN (insn);
+ insn = duplicate_insn_chain (BB_FOOTER (bb), insn);
+ if (insn)
+ BB_FOOTER (new_bb) = unlink_insn_chain (insn, get_last_insn ());
+ }
+
+ return new_bb;
+}
+
+\f
+/* Main entry point to this module - initialize the datastructures for
+ CFG layout changes. It keeps LOOPS up-to-date if not null.
+
+ FLAGS is a set of additional flags to pass to cleanup_cfg(). */
+
+void
+cfg_layout_initialize (unsigned int flags)
+{
+ rtx x;
+ basic_block bb;
+
+ initialize_original_copy_tables ();
+
+ cfg_layout_rtl_register_cfg_hooks ();
+
+ record_effective_endpoints ();
+
+ /* Make sure that the targets of non local gotos are marked. */
+ for (x = nonlocal_goto_handler_labels; x; x = XEXP (x, 1))
+ {
+ bb = BLOCK_FOR_INSN (XEXP (x, 0));
+ bb->flags |= BB_NON_LOCAL_GOTO_TARGET;
+ }
+
+ cleanup_cfg (CLEANUP_CFGLAYOUT | flags);
+}
+
+/* Splits superblocks. */
+void
+break_superblocks (void)
+{
+ sbitmap superblocks;
+ bool need = false;
+ basic_block bb;
+
+ superblocks = sbitmap_alloc (last_basic_block);
+ bitmap_clear (superblocks);
+
+ FOR_EACH_BB (bb)
+ if (bb->flags & BB_SUPERBLOCK)
+ {
+ bb->flags &= ~BB_SUPERBLOCK;
+ bitmap_set_bit (superblocks, bb->index);
+ need = true;
+ }
+
+ if (need)
+ {
+ rebuild_jump_labels (get_insns ());
+ find_many_sub_basic_blocks (superblocks);
+ }
+
+ free (superblocks);
+}
+
+/* Finalize the changes: reorder insn list according to the sequence specified
+ by aux pointers, enter compensation code, rebuild scope forest. */
+
+void
+cfg_layout_finalize (void)
+{
+#ifdef ENABLE_CHECKING
+ verify_flow_info ();
+#endif
+ force_one_exit_fallthru ();
+ rtl_register_cfg_hooks ();
+ if (reload_completed
+#ifdef HAVE_epilogue
+ && !HAVE_epilogue
+#endif
+ )
+ fixup_fallthru_exit_predecessor ();
+ fixup_reorder_chain ();
+
+ rebuild_jump_labels (get_insns ());
+ delete_dead_jumptables ();
+
+#ifdef ENABLE_CHECKING
+ verify_insn_chain ();
+ verify_flow_info ();
+#endif
+}
+
+
+/* Same as split_block but update cfg_layout structures. */
+
+static basic_block
+cfg_layout_split_block (basic_block bb, void *insnp)
+{
+ rtx insn = (rtx) insnp;
+ basic_block new_bb = rtl_split_block (bb, insn);
+
+ BB_FOOTER (new_bb) = BB_FOOTER (bb);
+ BB_FOOTER (bb) = NULL;
+
+ return new_bb;
+}
+
+/* Redirect Edge to DEST. */
+static edge
+cfg_layout_redirect_edge_and_branch (edge e, basic_block dest)
+{
+ basic_block src = e->src;
+ edge ret;
+
+ if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
+ return NULL;
+
+ if (e->dest == dest)
+ return e;
+
+ if (e->src != ENTRY_BLOCK_PTR
+ && (ret = try_redirect_by_replacing_jump (e, dest, true)))
+ {
+ df_set_bb_dirty (src);
+ return ret;
+ }
+
+ if (e->src == ENTRY_BLOCK_PTR
+ && (e->flags & EDGE_FALLTHRU) && !(e->flags & EDGE_COMPLEX))
+ {
+ if (dump_file)
+ fprintf (dump_file, "Redirecting entry edge from bb %i to %i\n",
+ e->src->index, dest->index);
+
+ df_set_bb_dirty (e->src);
+ redirect_edge_succ (e, dest);
+ return e;
+ }
+
+ /* Redirect_edge_and_branch may decide to turn branch into fallthru edge
+ in the case the basic block appears to be in sequence. Avoid this
+ transformation. */
+
+ if (e->flags & EDGE_FALLTHRU)
+ {
+ /* Redirect any branch edges unified with the fallthru one. */
+ if (JUMP_P (BB_END (src))
+ && label_is_jump_target_p (BB_HEAD (e->dest),
+ BB_END (src)))
+ {
+ edge redirected;
+
+ if (dump_file)
+ fprintf (dump_file, "Fallthru edge unified with branch "
+ "%i->%i redirected to %i\n",
+ e->src->index, e->dest->index, dest->index);
+ e->flags &= ~EDGE_FALLTHRU;
+ redirected = redirect_branch_edge (e, dest);
+ gcc_assert (redirected);
+ redirected->flags |= EDGE_FALLTHRU;
+ df_set_bb_dirty (redirected->src);
+ return redirected;
+ }
+ /* In case we are redirecting fallthru edge to the branch edge
+ of conditional jump, remove it. */
+ if (EDGE_COUNT (src->succs) == 2)
+ {
+ /* Find the edge that is different from E. */
+ edge s = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e);
+
+ if (s->dest == dest
+ && any_condjump_p (BB_END (src))
+ && onlyjump_p (BB_END (src)))
+ delete_insn (BB_END (src));
+ }
+ if (dump_file)
+ fprintf (dump_file, "Redirecting fallthru edge %i->%i to %i\n",
+ e->src->index, e->dest->index, dest->index);
+ ret = redirect_edge_succ_nodup (e, dest);
+ }
+ else
+ ret = redirect_branch_edge (e, dest);
+
+ /* We don't want simplejumps in the insn stream during cfglayout. */
+ gcc_assert (!simplejump_p (BB_END (src)));
+
+ df_set_bb_dirty (src);
+ return ret;
+}
+
+/* Simple wrapper as we always can redirect fallthru edges. */
+static basic_block
+cfg_layout_redirect_edge_and_branch_force (edge e, basic_block dest)
+{
+ edge redirected = cfg_layout_redirect_edge_and_branch (e, dest);
+
+ gcc_assert (redirected);
+ return NULL;
+}
+
+/* Same as delete_basic_block but update cfg_layout structures. */
+
+static void
+cfg_layout_delete_block (basic_block bb)
+{
+ rtx insn, next, prev = PREV_INSN (BB_HEAD (bb)), *to, remaints;
+
+ if (BB_HEADER (bb))
{
next = BB_HEAD (bb);
if (prev)
- NEXT_INSN (prev) = bb->il.rtl->header;
+ NEXT_INSN (prev) = BB_HEADER (bb);
else
- set_first_insn (bb->il.rtl->header);
- PREV_INSN (bb->il.rtl->header) = prev;
- insn = bb->il.rtl->header;
+ set_first_insn (BB_HEADER (bb));
+ PREV_INSN (BB_HEADER (bb)) = prev;
+ insn = BB_HEADER (bb);
while (NEXT_INSN (insn))
insn = NEXT_INSN (insn);
NEXT_INSN (insn) = next;
PREV_INSN (next) = insn;
}
next = NEXT_INSN (BB_END (bb));
- if (bb->il.rtl->footer)
+ if (BB_FOOTER (bb))
{
- insn = bb->il.rtl->footer;
+ insn = BB_FOOTER (bb);
while (insn)
{
if (BARRIER_P (insn))
if (PREV_INSN (insn))
NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
else
- bb->il.rtl->footer = NEXT_INSN (insn);
+ BB_FOOTER (bb) = NEXT_INSN (insn);
if (NEXT_INSN (insn))
PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
}
break;
insn = NEXT_INSN (insn);
}
- if (bb->il.rtl->footer)
+ if (BB_FOOTER (bb))
{
insn = BB_END (bb);
- NEXT_INSN (insn) = bb->il.rtl->footer;
- PREV_INSN (bb->il.rtl->footer) = insn;
+ NEXT_INSN (insn) = BB_FOOTER (bb);
+ PREV_INSN (BB_FOOTER (bb)) = insn;
while (NEXT_INSN (insn))
insn = NEXT_INSN (insn);
NEXT_INSN (insn) = next;
}
}
if (bb->next_bb != EXIT_BLOCK_PTR)
- to = &bb->next_bb->il.rtl->header;
+ to = &BB_HEADER (bb->next_bb);
else
to = &cfg_layout_function_footer;
if (BB_PARTITION (a) != BB_PARTITION (b))
return false;
+ /* Protect the loop latches. */
+ if (current_loops && b->loop_father->latch == b)
+ return false;
+
/* If we would end up moving B's instructions, make sure it doesn't fall
through into the exit block, since we cannot recover from a fallthrough
edge into the exit block occurring in the middle of a function. */
cfg_layout_merge_blocks (basic_block a, basic_block b)
{
bool forwarder_p = (b->flags & BB_FORWARDER_BLOCK) != 0;
+ rtx insn;
gcc_checking_assert (cfg_layout_can_merge_blocks_p (a, b));
try_redirect_by_replacing_jump (EDGE_SUCC (a, 0), b, true);
gcc_assert (!JUMP_P (BB_END (a)));
- /* When not optimizing and the edge is the only place in RTL which holds
+ /* When not optimizing CFG and the edge is the only place in RTL which holds
some unique locus, emit a nop with that locus in between. */
- if (!optimize && EDGE_SUCC (a, 0)->goto_locus)
- {
- rtx insn = BB_END (a), end = PREV_INSN (BB_HEAD (a));
- int goto_locus = EDGE_SUCC (a, 0)->goto_locus;
-
- while (insn != end && (!INSN_P (insn) || INSN_LOCATOR (insn) == 0))
- insn = PREV_INSN (insn);
- if (insn != end && locator_eq (INSN_LOCATOR (insn), goto_locus))
- goto_locus = 0;
- else
- {
- insn = BB_HEAD (b);
- end = NEXT_INSN (BB_END (b));
- while (insn != end && !INSN_P (insn))
- insn = NEXT_INSN (insn);
- if (insn != end && INSN_LOCATOR (insn) != 0
- && locator_eq (INSN_LOCATOR (insn), goto_locus))
- goto_locus = 0;
- }
- if (goto_locus)
- {
- BB_END (a) = emit_insn_after_noloc (gen_nop (), BB_END (a), a);
- INSN_LOCATOR (BB_END (a)) = goto_locus;
- }
- }
+ if (!optimize)
+ emit_nop_for_unique_locus_between (a, b);
/* Possible line number notes should appear in between. */
- if (b->il.rtl->header)
+ if (BB_HEADER (b))
{
rtx first = BB_END (a), last;
- last = emit_insn_after_noloc (b->il.rtl->header, BB_END (a), a);
+ last = emit_insn_after_noloc (BB_HEADER (b), BB_END (a), a);
/* The above might add a BARRIER as BB_END, but as barriers
aren't valid parts of a bb, remove_insn doesn't update
BB_END if it is a barrier. So adjust BB_END here. */
while (BB_END (a) != first && BARRIER_P (BB_END (a)))
BB_END (a) = PREV_INSN (BB_END (a));
delete_insn_chain (NEXT_INSN (first), last, false);
- b->il.rtl->header = NULL;
+ BB_HEADER (b) = NULL;
}
/* In the case basic blocks are not adjacent, move them around. */
if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
{
- rtx first = unlink_insn_chain (BB_HEAD (b), BB_END (b));
-
- emit_insn_after_noloc (first, BB_END (a), a);
- /* Skip possible DELETED_LABEL insn. */
- if (!NOTE_INSN_BASIC_BLOCK_P (first))
- first = NEXT_INSN (first);
- gcc_assert (NOTE_INSN_BASIC_BLOCK_P (first));
- BB_HEAD (b) = NULL;
+ insn = unlink_insn_chain (BB_HEAD (b), BB_END (b));
- /* emit_insn_after_noloc doesn't call df_insn_change_bb.
- We need to explicitly call. */
- update_bb_for_insn_chain (NEXT_INSN (first),
- BB_END (b),
- a);
-
- delete_insn (first);
+ emit_insn_after_noloc (insn, BB_END (a), a);
}
/* Otherwise just re-associate the instructions. */
else
{
- rtx insn;
-
- update_bb_for_insn_chain (BB_HEAD (b), BB_END (b), a);
-
insn = BB_HEAD (b);
- /* Skip possible DELETED_LABEL insn. */
- if (!NOTE_INSN_BASIC_BLOCK_P (insn))
- insn = NEXT_INSN (insn);
- gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
- BB_HEAD (b) = NULL;
BB_END (a) = BB_END (b);
- delete_insn (insn);
}
+ /* emit_insn_after_noloc doesn't call df_insn_change_bb.
+ We need to explicitly call. */
+ update_bb_for_insn_chain (insn, BB_END (b), a);
+
+ /* Skip possible DELETED_LABEL insn. */
+ if (!NOTE_INSN_BASIC_BLOCK_P (insn))
+ insn = NEXT_INSN (insn);
+ gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
+ BB_HEAD (b) = NULL;
+ delete_insn (insn);
+
df_bb_delete (b->index);
/* Possible tablejumps and barriers should appear after the block. */
- if (b->il.rtl->footer)
+ if (BB_FOOTER (b))
{
- if (!a->il.rtl->footer)
- a->il.rtl->footer = b->il.rtl->footer;
+ if (!BB_FOOTER (a))
+ BB_FOOTER (a) = BB_FOOTER (b);
else
{
- rtx last = a->il.rtl->footer;
+ rtx last = BB_FOOTER (a);
while (NEXT_INSN (last))
last = NEXT_INSN (last);
- NEXT_INSN (last) = b->il.rtl->footer;
- PREV_INSN (b->il.rtl->footer) = last;
+ NEXT_INSN (last) = BB_FOOTER (b);
+ PREV_INSN (BB_FOOTER (b)) = last;
}
- b->il.rtl->footer = NULL;
+ BB_FOOTER (b) = NULL;
}
/* If B was a forwarder block, propagate the locus on the edge. */
- if (forwarder_p && !EDGE_SUCC (b, 0)->goto_locus)
+ if (forwarder_p
+ && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) != UNKNOWN_LOCATION)
EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
if (dump_file)
{
}
+/* Return true if BB contains only labels or non-executable
+ instructions. */
+
+static bool
+rtl_block_empty_p (basic_block bb)
+{
+ rtx insn;
+
+ if (bb == ENTRY_BLOCK_PTR || bb == EXIT_BLOCK_PTR)
+ return true;
+
+ FOR_BB_INSNS (bb, insn)
+ if (NONDEBUG_INSN_P (insn) && !any_uncondjump_p (insn))
+ return false;
+
+ return true;
+}
+
+/* Split a basic block if it ends with a conditional branch and if
+ the other part of the block is not empty. */
+
+static basic_block
+rtl_split_block_before_cond_jump (basic_block bb)
+{
+ rtx insn;
+ rtx split_point = NULL;
+ rtx last = NULL;
+ bool found_code = false;
+
+ FOR_BB_INSNS (bb, insn)
+ {
+ if (any_condjump_p (insn))
+ split_point = last;
+ else if (NONDEBUG_INSN_P (insn))
+ found_code = true;
+ last = insn;
+ }
+
+ /* Did not find everything. */
+ if (found_code && split_point)
+ return split_block (bb, split_point)->dest;
+ else
+ return NULL;
+}
+
/* Return 1 if BB ends with a call, possibly followed by some
instructions that must stay with the call, 0 otherwise. */
if (! blocks)
check_last_block = true;
else
- check_last_block = TEST_BIT (blocks, EXIT_BLOCK_PTR->prev_bb->index);
+ check_last_block = bitmap_bit_p (blocks, EXIT_BLOCK_PTR->prev_bb->index);
/* In the last basic block, before epilogue generation, there will be
a fallthru edge to EXIT. Special care is required if the last insn
if (!bb)
continue;
- if (blocks && !TEST_BIT (blocks, i))
+ if (blocks && !bitmap_bit_p (blocks, i))
continue;
for (insn = BB_END (bb); ; insn = prev_insn)
void
init_rtl_bb_info (basic_block bb)
{
- gcc_assert (!bb->il.rtl);
- bb->il.rtl = ggc_alloc_cleared_rtl_bb_info ();
+ gcc_assert (!bb->il.x.rtl);
+ bb->il.x.head_ = NULL;
+ bb->il.x.rtl = ggc_alloc_cleared_rtl_bb_info ();
}
/* Returns true if it is possible to remove edge E by redirecting
return true;
}
-/* We do not want to declare these functions in a header file, since they
- should only be used through the cfghooks interface, and we do not want to
- move them here since it would require also moving quite a lot of related
- code. They are in cfglayout.c. */
-extern bool cfg_layout_can_duplicate_bb_p (const_basic_block);
-extern basic_block cfg_layout_duplicate_bb (basic_block);
-
static basic_block
rtl_duplicate_bb (basic_block bb)
{
return bb;
}
+/* Do book-keeping of basic block BB for the profile consistency checker.
+ If AFTER_PASS is 0, do pre-pass accounting, or if AFTER_PASS is 1
+ then do post-pass accounting. Store the counting in RECORD. */
+static void
+rtl_account_profile_record (basic_block bb, int after_pass,
+ struct profile_record *record)
+{
+ rtx insn;
+ FOR_BB_INSNS (bb, insn)
+ if (INSN_P (insn))
+ {
+ record->size[after_pass]
+ += insn_rtx_cost (PATTERN (insn), false);
+ if (profile_status == PROFILE_READ)
+ record->time[after_pass]
+ += insn_rtx_cost (PATTERN (insn), true) * bb->count;
+ else if (profile_status == PROFILE_GUESSED)
+ record->time[after_pass]
+ += insn_rtx_cost (PATTERN (insn), true) * bb->frequency;
+ }
+}
+
/* Implementation of CFG manipulation for linearized RTL. */
struct cfg_hooks rtl_cfg_hooks = {
"rtl",
NULL, /* lv_add_condition_to_bb */
NULL, /* lv_adjust_loop_header_phi*/
NULL, /* extract_cond_bb_edges */
- NULL /* flush_pending_stmts */
+ NULL, /* flush_pending_stmts */
+ rtl_block_empty_p, /* block_empty_p */
+ rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
+ rtl_account_profile_record,
};
/* Implementation of CFG manipulation for cfg layout RTL, where
rtl_lv_add_condition_to_bb, /* lv_add_condition_to_bb */
NULL, /* lv_adjust_loop_header_phi*/
rtl_extract_cond_bb_edges, /* extract_cond_bb_edges */
- NULL /* flush_pending_stmts */
+ NULL, /* flush_pending_stmts */
+ rtl_block_empty_p, /* block_empty_p */
+ rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
+ rtl_account_profile_record,
};
+
+#include "gt-cfgrtl.h"