#include "config.h"
#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
#include "rtl.h"
#include "hard-reg-set.h"
#include "basic-block.h"
#include "tm_p.h"
#include "target.h"
-#include "obstack.h"
-
/* cleanup_cfg maintains following flags for each basic block. */
enum bb_flags
rtx *, rtx *));
static bool insns_match_p PARAMS ((int, rtx, rtx));
-static bool delete_unreachable_blocks PARAMS ((void));
static bool label_is_jump_target_p PARAMS ((rtx, rtx));
static bool tail_recursion_label_p PARAMS ((rtx));
static void merge_blocks_move_predecessor_nojumps PARAMS ((basic_block,
unconditional jump. */
jump_block = cbranch_fallthru_edge->dest;
if (jump_block->pred->pred_next
- || jump_block->index == n_basic_blocks - 1
+ || jump_block->next_bb == EXIT_BLOCK_PTR
|| !FORWARDER_BLOCK_P (jump_block))
return false;
jump_dest_block = jump_block->succ->dest;
static bool
mark_effect (exp, nonequal)
- rtx exp;
- regset nonequal;
+ rtx exp;
+ regset nonequal;
{
int regno;
rtx dest;
{
/* In case we do clobber the register, mark it as equal, as we know the
value is dead so it don't have to match. */
- case CLOBBER:
- if (REG_P (XEXP (exp, 0)))
- {
- dest = XEXP (exp, 0);
- regno = REGNO (dest);
- CLEAR_REGNO_REG_SET (nonequal, regno);
- if (regno < FIRST_PSEUDO_REGISTER)
- {
- int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
- while (--n > 0)
- CLEAR_REGNO_REG_SET (nonequal, regno + n);
- }
- }
- return false;
+ case CLOBBER:
+ if (REG_P (XEXP (exp, 0)))
+ {
+ dest = XEXP (exp, 0);
+ regno = REGNO (dest);
+ CLEAR_REGNO_REG_SET (nonequal, regno);
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
+ while (--n > 0)
+ CLEAR_REGNO_REG_SET (nonequal, regno + n);
+ }
+ }
+ return false;
- case SET:
- if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
- return false;
- dest = SET_DEST (exp);
- if (dest == pc_rtx)
- return false;
- if (!REG_P (dest))
- return true;
- regno = REGNO (dest);
- SET_REGNO_REG_SET (nonequal, regno);
- if (regno < FIRST_PSEUDO_REGISTER)
- {
- int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
- while (--n > 0)
- SET_REGNO_REG_SET (nonequal, regno + n);
- }
+ case SET:
+ if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
return false;
-
- default:
+ dest = SET_DEST (exp);
+ if (dest == pc_rtx)
return false;
+ if (!REG_P (dest))
+ return true;
+ regno = REGNO (dest);
+ SET_REGNO_REG_SET (nonequal, regno);
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
+ while (--n > 0)
+ SET_REGNO_REG_SET (nonequal, regno + n);
+ }
+ return false;
+
+ default:
+ return false;
}
}
return 0;
}
/* Attempt to prove that the basic block B will have no side effects and
- allways continues in the same edge if reached via E. Return the edge
+ always continues in the same edge if reached via E. Return the edge
if exist, NULL otherwise. */
static edge
/* Second branch must end with onlyjump, as we will eliminate the jump. */
if (!any_condjump_p (e->src->end))
return NULL;
-
+
if (!any_condjump_p (b->end) || !onlyjump_p (b->end))
{
BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
return NULL;
/* Ensure that the comparison operators are equivalent.
- ??? This is far too pesimistic. We should allow swapped operands,
+ ??? This is far too pessimistic. We should allow swapped operands,
different CCmodes, or for example comparisons for interval, that
dominate even when operands are not equivalent. */
if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
for (insn = NEXT_INSN (b->head); insn != NEXT_INSN (b->end) && !failed;
insn = NEXT_INSN (insn))
- {
- if (INSN_P (insn))
- {
- rtx pat = PATTERN (insn);
-
- if (GET_CODE (pat) == PARALLEL)
- {
- for (i = 0; i < XVECLEN (pat, 0); i++)
- failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
- }
- else
- failed |= mark_effect (pat, nonequal);
- }
+ {
+ if (INSN_P (insn))
+ {
+ rtx pat = PATTERN (insn);
+
+ if (GET_CODE (pat) == PARALLEL)
+ {
+ for (i = 0; i < XVECLEN (pat, 0); i++)
+ failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
+ }
+ else
+ failed |= mark_effect (pat, nonequal);
+ }
- cselib_process_insn (insn);
- }
+ cselib_process_insn (insn);
+ }
/* Later we should clear nonequal of dead registers. So far we don't
have life information in cfg_cleanup. */
if (mode & CLEANUP_PRE_LOOP)
{
rtx insn = (target->succ->flags & EDGE_FALLTHRU
- ? target->head : prev_nonnote_insn (target->end));
+ ? target->head : prev_nonnote_insn (target->end));
if (GET_CODE (insn) != NOTE)
insn = NEXT_INSN (insn);
if (GET_CODE (insn) == NOTE)
break;
+
+ /* Do not clean up branches to just past the end of a loop
+ at this time; it can mess up the loop optimizer's
+ recognition of some patterns. */
+
+ insn = PREV_INSN (target->head);
+ if (insn && GET_CODE (insn) == NOTE
+ && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
+ break;
}
counter++;
target = new_target;
threaded |= new_target_threaded;
- }
+ }
if (counter >= n_basic_blocks)
{
{
notice_new_block (redirect_edge_and_branch_force (e, target));
if (rtl_dump_file)
- fprintf (rtl_dump_file, "Conditionals threaded.\n");
+ fprintf (rtl_dump_file, "Conditionals threaded.\n");
}
else if (!redirect_edge_and_branch (e, target))
{
&& first == threaded_edges [n]->src)
n++;
t = first->succ;
- }
+ }
t->count -= edge_count;
if (t->count < 0)
basic_block a, b;
{
rtx barrier;
- int index;
barrier = next_nonnote_insn (a->end);
if (GET_CODE (barrier) != BARRIER)
fprintf (rtl_dump_file, "Moved block %d before %d and merged.\n",
a->index, b->index);
- /* Swap the records for the two blocks around. Although we are deleting B,
- A is now where B was and we want to compact the BB array from where
- A used to be. */
- BASIC_BLOCK (a->index) = b;
- BASIC_BLOCK (b->index) = a;
- index = a->index;
- a->index = b->index;
- b->index = index;
+ /* Swap the records for the two blocks around. */
+
+ unlink_block (a);
+ link_block (a, b->prev_bb);
/* Now blocks A and B are contiguous. Merge them. */
merge_blocks_nomove (a, b);
if (rtl_dump_file)
fprintf (rtl_dump_file, "Merged %d and %d without moving.\n",
- b_index, c_index);
+ b_index, c_index);
return true;
}
static bool
insns_match_p (mode, i1, i2)
- int mode ATTRIBUTE_UNUSED;
- rtx i1, i2;
+ int mode ATTRIBUTE_UNUSED;
+ rtx i1, i2;
{
rtx p1, p2;
equal, they were constructed identically. */
if (GET_CODE (i1) == CALL_INSN
- && !rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
- CALL_INSN_FUNCTION_USAGE (i2)))
+ && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
+ CALL_INSN_FUNCTION_USAGE (i2))
+ || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
return false;
#ifdef STACK_REGS
remove_note (i1, equiv1);
remove_note (i2, equiv2);
}
-
+
afterlast1 = last1, afterlast2 = last2;
last1 = i1, last2 = i2;
- ninsns++;
+ ninsns++;
}
i1 = PREV_INSN (i1);
enum rtx_code code1, code2;
if (!bb2->succ
- || !bb2->succ->succ_next
+ || !bb2->succ->succ_next
|| bb2->succ->succ_next->succ_next
|| !any_condjump_p (bb2->end)
|| !onlyjump_p (bb2->end))
/* Do not crossjump across loop boundaries. This is a temporary
workaround for the common scenario in which crossjumping results
in killing the duplicated loop condition, making bb-reorder rotate
- the loop incorectly, leaving an extra unconditional jump inside
+ the loop incorrectly, leaving an extra unconditional jump inside
the loop.
This check should go away once bb-reorder knows how to duplicate
roughly similar. */
if (match
&& !optimize_size
- && bb1->frequency > BB_FREQ_MAX / 1000
- && bb2->frequency > BB_FREQ_MAX / 1000)
+ && maybe_hot_bb_p (bb1)
+ && maybe_hot_bb_p (bb2))
{
int prob2;
return match;
}
- /* Generic case - we are seeing an computed jump, table jump or trapping
+ /* Generic case - we are seeing a computed jump, table jump or trapping
instruction. */
/* First ensure that the instructions match. There may be many outgoing
if (fallthru1)
{
basic_block d1 = (forwarder_block_p (fallthru1->dest)
- ? fallthru1->dest->succ->dest: fallthru1->dest);
+ ? fallthru1->dest->succ->dest: fallthru1->dest);
basic_block d2 = (forwarder_block_p (fallthru2->dest)
- ? fallthru2->dest->succ->dest: fallthru2->dest);
+ ? fallthru2->dest->succ->dest: fallthru2->dest);
if (d1 != d2)
return false;
{
int nmatch;
basic_block src1 = e1->src, src2 = e2->src;
- basic_block redirect_to;
+ basic_block redirect_to, redirect_from, to_remove;
rtx newpos1, newpos2;
edge s;
- rtx last;
- rtx label;
/* Search backward through forwarder blocks. We don't need to worry
about multiple entry or chained forwarders, as they will be optimized
if (GET_CODE (newpos1) == NOTE)
newpos1 = NEXT_INSN (newpos1);
- last = src1->end;
-
- /* Emit the jump insn. */
- label = block_label (redirect_to);
- emit_jump_insn_after (gen_jump (label), src1->end);
- JUMP_LABEL (src1->end) = label;
- LABEL_NUSES (label)++;
- /* Delete the now unreachable instructions. */
- delete_insn_chain (newpos1, last);
+ redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
+ to_remove = redirect_from->succ->dest;
- /* Make sure there is a barrier after the new jump. */
- last = next_nonnote_insn (src1->end);
- if (!last || GET_CODE (last) != BARRIER)
- emit_barrier_after (src1->end);
+ redirect_edge_and_branch_force (redirect_from->succ, redirect_to);
+ flow_delete_block (to_remove);
- /* Update CFG. */
- while (src1->succ)
- remove_edge (src1->succ);
- make_single_succ_edge (src1, redirect_to, 0);
-
- update_forwarder_flag (src1);
+ update_forwarder_flag (redirect_from);
return true;
}
try_optimize_cfg (mode)
int mode;
{
- int i;
bool changed_overall = false;
bool changed;
int iterations = 0;
+ basic_block bb, b;
if (mode & CLEANUP_CROSSJUMP)
add_noreturn_fake_exit_edges ();
- for (i = 0; i < n_basic_blocks; i++)
- update_forwarder_flag (BASIC_BLOCK (i));
+ FOR_EACH_BB (bb)
+ update_forwarder_flag (bb);
if (mode & CLEANUP_UPDATE_LIFE)
clear_bb_flags ();
"\n\ntry_optimize_cfg iteration %i\n\n",
iterations);
- for (i = 0; i < n_basic_blocks;)
+ for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
{
- basic_block c, b = BASIC_BLOCK (i);
+ basic_block c;
edge s;
bool changed_here = false;
/* Delete trivially dead basic blocks. */
while (b->pred == NULL)
{
- c = BASIC_BLOCK (b->index - 1);
+ c = b->prev_bb;
if (rtl_dump_file)
fprintf (rtl_dump_file, "Deleting block %i.\n",
b->index);
"Deleting fallthru block %i.\n",
b->index);
- c = BASIC_BLOCK (b->index ? b->index - 1 : 1);
+ c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
redirect_edge_succ_nodup (b->pred, b->succ->dest);
flow_delete_block (b);
changed = true;
&& !(s->flags & EDGE_COMPLEX)
&& (c = s->dest) != EXIT_BLOCK_PTR
&& c->pred->pred_next == NULL
+ && b != c
/* If the jump insn has side effects,
we can't kill the edge. */
&& (GET_CODE (b->end) != JUMP_INSN
/* Don't get confused by the index shift caused by
deleting blocks. */
if (!changed_here)
- i = b->index + 1;
+ b = b->next_bb;
else
changed = true;
}
\f
/* Delete all unreachable basic blocks. */
-static bool
+bool
delete_unreachable_blocks ()
{
- int i, j;
bool changed = false;
+ basic_block b, next_bb;
find_unreachable_blocks ();
- /* Delete all unreachable basic blocks. Do compaction concurrently,
- as otherwise we can wind up with O(N^2) behaviour here when we
- have oodles of dead code. */
+ /* Delete all unreachable basic blocks. */
- for (i = j = 0; i < n_basic_blocks; ++i)
+ for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
{
- basic_block b = BASIC_BLOCK (i);
+ next_bb = b->next_bb;
if (!(b->flags & BB_REACHABLE))
{
- flow_delete_block_noexpunge (b);
- expunge_block_nocompact (b);
+ flow_delete_block (b);
changed = true;
}
- else
- {
- BASIC_BLOCK (j) = b;
- b->index = j++;
- }
}
- n_basic_blocks = j;
- basic_block_info->num_elements = j;
if (changed)
tidy_fallthru_edges ();
{
changed = true;
/* We've possibly created trivially dead code. Cleanup it right
- now to introduce more oppurtunities for try_optimize_cfg. */
- if (!(mode & (CLEANUP_UPDATE_LIFE | CLEANUP_PRE_SIBCALL))
+ now to introduce more opportunities for try_optimize_cfg. */
+ if (!(mode & (CLEANUP_NO_INSN_DEL
+ | CLEANUP_UPDATE_LIFE | CLEANUP_PRE_SIBCALL))
&& !reload_completed)
delete_trivially_dead_insns (get_insns(), max_reg_num ());
}
+
+ compact_blocks ();
+
while (try_optimize_cfg (mode))
{
delete_unreachable_blocks (), changed = true;
if (mode & CLEANUP_UPDATE_LIFE)
{
- /* Cleaning up CFG introduces more oppurtunities for dead code
- removal that in turn may introduce more oppurtunities for
+ /* Cleaning up CFG introduces more opportunities for dead code
+ removal that in turn may introduce more opportunities for
cleaning up the CFG. */
if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
PROP_DEATH_NOTES
| PROP_LOG_LINKS))
break;
}
- else if (!(mode & CLEANUP_PRE_SIBCALL) && !reload_completed)
+ else if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_PRE_SIBCALL))
+ && !reload_completed)
{
if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
break;
/* Kill the data we won't maintain. */
free_EXPR_LIST_list (&label_value_list);
- free_EXPR_LIST_list (&tail_recursion_label_list);
timevar_pop (TV_CLEANUP_CFG);
return changed;