#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "hash-set.h"
-#include "machmode.h"
-#include "vec.h"
-#include "double-int.h"
-#include "input.h"
-#include "alias.h"
-#include "symtab.h"
-#include "options.h"
-#include "wide-int.h"
-#include "inchash.h"
+#include "backend.h"
+#include "hard-reg-set.h"
#include "tree.h"
+#include "gimple.h"
+#include "cfghooks.h"
+#include "tree-pass.h"
+#include "ssa.h"
+#include "alias.h"
#include "fold-const.h"
#include "flags.h"
-#include "predict.h"
-#include "tm.h"
-#include "hard-reg-set.h"
-#include "input.h"
-#include "function.h"
-#include "dominance.h"
-#include "cfg.h"
#include "cfganal.h"
-#include "basic-block.h"
-#include "hash-table.h"
-#include "tree-ssa-alias.h"
#include "internal-fn.h"
-#include "gimple-expr.h"
-#include "is-a.h"
-#include "gimple.h"
#include "gimple-iterator.h"
-#include "gimple-ssa.h"
-#include "tree-phinodes.h"
#include "tree-ssa.h"
#include "tree-ssa-threadupdate.h"
-#include "ssa-iterators.h"
#include "dumpfile.h"
#include "cfgloop.h"
#include "dbgcnt.h"
#include "tree-cfg.h"
-#include "tree-pass.h"
/* Given a block B, update the CFG and SSA graph to reflect redirecting
one or more in-edges to B to instead reach the destination of an
may have many incoming edges threaded to the same outgoing edge. This
can be naturally implemented with a hash table. */
-struct redirection_data : typed_free_remove<redirection_data>
+struct redirection_data : free_ptr_hash<redirection_data>
{
/* We support wiring up two block duplicates in a jump threading path.
struct el *incoming_edges;
/* hash_table support. */
- typedef redirection_data *value_type;
- typedef redirection_data *compare_type;
static inline hashval_t hash (const redirection_data *);
static inline int equal (const redirection_data *, const redirection_data *);
};
return true;
}
+/* Rather than search all the edges in jump thread paths each time
+ DOM is able to simply if control statement, we build a hash table
+ with the deleted edges. We only care about the address of the edge,
+ not its contents. */
+struct removed_edges : nofree_ptr_hash<edge_def>
+{
+ static hashval_t hash (edge e) { return htab_hash_pointer (e); }
+ static bool equal (edge e1, edge e2) { return e1 == e2; }
+};
+
+static hash_table<removed_edges> *removed_edges;
+
/* Data structure of information to pass to hash table traversal routines. */
struct ssa_local_info_t
{
Also remove all outgoing edges except the edge which reaches DEST_BB.
If DEST_BB is NULL, then remove all outgoing edges. */
-static void
+void
remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb)
{
gimple_stmt_iterator gsi;
else
ei_next (&ei);
}
+
+ /* If the remaining edge is a loop exit, there must have
+ a removed edge that was not a loop exit.
+
+ In that case BB and possibly other blocks were previously
+ in the loop, but are now outside the loop. Thus, we need
+ to update the loop structures. */
+ if (single_succ_p (bb)
+ && loop_outer (bb->loop_father)
+ && loop_exit_edge_p (bb->loop_father, single_succ_edge (bb)))
+ loops_state_set (LOOPS_NEED_FIXUP);
}
/* Create a duplicate of BB. Record the duplicate block in an array
For example, assume we have the following control flow and identified
jump threading paths:
- A B C
- \ | /
- Ea \ |Eb / Ec
- \ | /
- v v v
- J <-- Joiner
- / \
- Eoff/ \Eon
- / \
- v v
- Soff Son <--- Normal
- /\
- Ed/ \ Ee
- / \
- v v
- D E
-
- Jump threading paths: A -> J -> Son -> D (path 1)
- C -> J -> Son -> E (path 2)
+ A B C
+ \ | /
+ Ea \ |Eb / Ec
+ \ | /
+ v v v
+ J <-- Joiner
+ / \
+ Eoff/ \Eon
+ / \
+ v v
+ Soff Son <--- Normal
+ /\
+ Ed/ \ Ee
+ / \
+ v v
+ D E
+
+ Jump threading paths: A -> J -> Son -> D (path 1)
+ C -> J -> Son -> E (path 2)
Note that the control flow could be more complicated:
- Each jump threading path may have more than one incoming edge. I.e. A and
In the aboe example, after all jump threading is complete, we will
end up with the following control flow:
- A B C
- | | |
- Ea| |Eb |Ec
- | | |
- v v v
- Ja J Jc
- / \ / \Eon' / \
- Eona/ \ ---/---\-------- \Eonc
- / \ / / \ \
- v v v v v
- Sona Soff Son Sonc
- \ /\ /
- \___________ / \ _____/
- \ / \/
- vv v
- D E
+ A B C
+ | | |
+ Ea| |Eb |Ec
+ | | |
+ v v v
+ Ja J Jc
+ / \ / \Eon' / \
+ Eona/ \ ---/---\-------- \Eonc
+ / \ / / \ \
+ v v v v v
+ Sona Soff Son Sonc
+ \ /\ /
+ \___________ / \ _____/
+ \ / \/
+ vv v
+ D E
The main issue to notice here is that when we are processing path 1
(A->J->Son->D) we need to figure out the outgoing edge weights to
static bool
compute_path_counts (struct redirection_data *rd,
- ssa_local_info_t *local_info,
- gcov_type *path_in_count_ptr,
- gcov_type *path_out_count_ptr,
- int *path_in_freq_ptr)
+ ssa_local_info_t *local_info,
+ gcov_type *path_in_count_ptr,
+ gcov_type *path_out_count_ptr,
+ int *path_in_freq_ptr)
{
edge e = rd->incoming_edges->e;
vec<jump_thread_edge *> *path = THREAD_PATH (e);
/* Start by accumulating incoming edge counts to the path's first bb
into a couple buckets:
- path_in_count: total count of incoming edges that flow into the
- current path.
- nonpath_count: total count of incoming edges that are not
- flowing along *any* path. These are the counts
- that will still flow along the original path after
- all path duplication is done by potentially multiple
- calls to this routine.
+ path_in_count: total count of incoming edges that flow into the
+ current path.
+ nonpath_count: total count of incoming edges that are not
+ flowing along *any* path. These are the counts
+ that will still flow along the original path after
+ all path duplication is done by potentially multiple
+ calls to this routine.
(any other incoming edge counts are for a different jump threading
path that will be handled by a later call to this routine.)
To make this easier, start by recording all incoming edges that flow into
vec<jump_thread_edge *> *ein_path = THREAD_PATH (ein);
/* Simply check the incoming edge src against the set captured above. */
if (ein_path
- && bitmap_bit_p (in_edge_srcs, (*ein_path)[0]->e->src->index))
- {
- /* It is necessary but not sufficient that the last path edges
- are identical. There may be different paths that share the
- same last path edge in the case where the last edge has a nocopy
- source block. */
- gcc_assert (ein_path->last ()->e == elast);
- path_in_count += ein->count;
- path_in_freq += EDGE_FREQUENCY (ein);
- }
+ && bitmap_bit_p (in_edge_srcs, (*ein_path)[0]->e->src->index))
+ {
+ /* It is necessary but not sufficient that the last path edges
+ are identical. There may be different paths that share the
+ same last path edge in the case where the last edge has a nocopy
+ source block. */
+ gcc_assert (ein_path->last ()->e == elast);
+ path_in_count += ein->count;
+ path_in_freq += EDGE_FREQUENCY (ein);
+ }
else if (!ein_path)
- {
- /* Keep track of the incoming edges that are not on any jump-threading
- path. These counts will still flow out of original path after all
- jump threading is complete. */
- nonpath_count += ein->count;
- }
+ {
+ /* Keep track of the incoming edges that are not on any jump-threading
+ path. These counts will still flow out of original path after all
+ jump threading is complete. */
+ nonpath_count += ein->count;
+ }
}
/* This is needed due to insane incoming frequencies. */
edge epath = (*path)[i]->e;
gcov_type cur_count = epath->count;
if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
- {
- has_joiner = true;
- cur_count = apply_probability (cur_count, onpath_scale);
- }
+ {
+ has_joiner = true;
+ cur_count = apply_probability (cur_count, onpath_scale);
+ }
/* In the joiner case we need to update nonpath_count for any edges
- coming into the path that will contribute to the count flowing
- into the path successor. */
+ coming into the path that will contribute to the count flowing
+ into the path successor. */
if (has_joiner && epath != elast)
{
- /* Look for other incoming edges after joiner. */
- FOR_EACH_EDGE (ein, ei, epath->dest->preds)
- {
- if (ein != epath
- /* Ignore in edges from blocks we have duplicated for a
- threading path, which have duplicated edge counts until
- they are redirected by an invocation of this routine. */
- && !bitmap_bit_p (local_info->duplicate_blocks,
- ein->src->index))
- nonpath_count += ein->count;
- }
+ /* Look for other incoming edges after joiner. */
+ FOR_EACH_EDGE (ein, ei, epath->dest->preds)
+ {
+ if (ein != epath
+ /* Ignore in edges from blocks we have duplicated for a
+ threading path, which have duplicated edge counts until
+ they are redirected by an invocation of this routine. */
+ && !bitmap_bit_p (local_info->duplicate_blocks,
+ ein->src->index))
+ nonpath_count += ein->count;
+ }
}
if (cur_count < path_out_count)
- path_out_count = cur_count;
+ path_out_count = cur_count;
if (epath->count < min_path_count)
- min_path_count = epath->count;
+ min_path_count = epath->count;
}
/* We computed path_out_count above assuming that this path targeted
and the duplicate edge EDUP will have a count of PATH_OUT_COUNT. */
static void
update_profile (edge epath, edge edup, gcov_type path_in_count,
- gcov_type path_out_count, int path_in_freq)
+ gcov_type path_out_count, int path_in_freq)
{
/* First update the duplicated block's count / frequency. */
FOR_EACH_EDGE (esucc, ei, bb->succs)
{
if (!bb->count)
- continue;
+ continue;
/* Prevent overflow computation due to insane profiles. */
if (esucc->count < bb->count)
- esucc->probability = GCOV_COMPUTE_SCALE (esucc->count,
- bb->count);
+ esucc->probability = GCOV_COMPUTE_SCALE (esucc->count,
+ bb->count);
else
- /* Can happen with missing/guessed probabilities, since we
- may determine that more is flowing along duplicated
- path than joiner succ probabilities allowed.
- Counts and freqs will be insane after jump threading,
- at least make sure probability is sane or we will
- get a flow verification error.
- Not much we can do to make counts/freqs sane without
- redoing the profile estimation. */
- esucc->probability = REG_BR_PROB_BASE;
+ /* Can happen with missing/guessed probabilities, since we
+ may determine that more is flowing along duplicated
+ path than joiner succ probabilities allowed.
+ Counts and freqs will be insane after jump threading,
+ at least make sure probability is sane or we will
+ get a flow verification error.
+ Not much we can do to make counts/freqs sane without
+ redoing the profile estimation. */
+ esucc->probability = REG_BR_PROB_BASE;
}
}
static void
update_joiner_offpath_counts (edge epath, basic_block dup_bb,
- gcov_type path_in_count,
- gcov_type path_out_count)
+ gcov_type path_in_count,
+ gcov_type path_out_count)
{
/* Compute the count that currently flows off path from the joiner.
In other words, the total count of joiner's out edges other than
FOR_EACH_EDGE (enonpath, ei, epath->src->succs)
{
if (enonpath == epath)
- continue;
+ continue;
total_orig_off_path_count += enonpath->count;
}
{
/* Look for edges going off of the threading path. */
if (enonpath == epath)
- continue;
+ continue;
/* Find the corresponding edge out of the duplicated joiner. */
edge enonpathdup = find_edge (dup_bb, enonpath->dest);
gcc_assert (enonpathdup);
/* We can't use the original probability of the joiner's out
- edges, since the probabilities of the original branch
- and the duplicated branches may vary after all threading is
- complete. But apportion the duplicated joiner's off-path
- total edge count computed earlier (total_dup_off_path_count)
- among the duplicated off-path edges based on their original
- ratio to the full off-path count (total_orig_off_path_count).
- */
+ edges, since the probabilities of the original branch
+ and the duplicated branches may vary after all threading is
+ complete. But apportion the duplicated joiner's off-path
+ total edge count computed earlier (total_dup_off_path_count)
+ among the duplicated off-path edges based on their original
+ ratio to the full off-path count (total_orig_off_path_count).
+ */
int scale = GCOV_COMPUTE_SCALE (enonpath->count,
- total_orig_off_path_count);
+ total_orig_off_path_count);
/* Give the duplicated offpath edge a portion of the duplicated
- total. */
+ total. */
enonpathdup->count = apply_scale (scale,
- total_dup_off_path_count);
+ total_dup_off_path_count);
/* Now update the original offpath edge count, handling underflow
- due to rounding errors. */
+ due to rounding errors. */
enonpath->count -= enonpathdup->count;
if (enonpath->count < 0)
- enonpath->count = 0;
+ enonpath->count = 0;
}
}
FOR_EACH_EDGE (ein, ei, e->dest->preds)
{
if (ein->count)
- return false;
+ return false;
non_zero_freq |= ein->src->frequency != 0;
}
{
edge epath = (*path)[i]->e;
if (epath->src->count)
- return false;
+ return false;
non_zero_freq |= epath->src->frequency != 0;
edge esucc;
FOR_EACH_EDGE (esucc, ei, epath->src->succs)
- {
- if (esucc->count)
- return false;
- non_zero_freq |= esucc->src->frequency != 0;
- }
+ {
+ if (esucc->count)
+ return false;
+ non_zero_freq |= esucc->src->frequency != 0;
+ }
}
return non_zero_freq;
}
FOR_EACH_EDGE (ein, ei, e->dest->preds)
{
/* Scale up the frequency by REG_BR_PROB_BASE, to avoid rounding
- errors applying the probability when the frequencies are very
- small. */
+ errors applying the probability when the frequencies are very
+ small. */
ein->count = apply_probability (ein->src->frequency * REG_BR_PROB_BASE,
- ein->probability);
+ ein->probability);
}
for (unsigned int i = 1; i < path->length (); i++)
edge epath = (*path)[i]->e;
edge esucc;
/* Scale up the frequency by REG_BR_PROB_BASE, to avoid rounding
- errors applying the edge probability when the frequencies are very
- small. */
+ errors applying the edge probability when the frequencies are very
+ small. */
epath->src->count = epath->src->frequency * REG_BR_PROB_BASE;
FOR_EACH_EDGE (esucc, ei, epath->src->succs)
- esucc->count = apply_probability (esucc->src->count,
- esucc->probability);
+ esucc->count = apply_probability (esucc->src->count,
+ esucc->probability);
}
}
{
edge epath = (*path)[i]->e;
FOR_EACH_EDGE (esucc, ei, epath->src->succs)
- esucc->count = 0;
+ esucc->count = 0;
epath->src->count = 0;
}
/* Also need to clear the counts along duplicated path. */
{
basic_block dup = rd->dup_blocks[i];
if (!dup)
- continue;
+ continue;
FOR_EACH_EDGE (esucc, ei, dup->succs)
- esucc->count = 0;
+ esucc->count = 0;
dup->count = 0;
}
}
to see if the paths through RD are using estimated frequencies because
the routine had zero profile counts. */
bool do_freqs_to_counts = (profile_status_for_fn (cfun) != PROFILE_READ
- || estimated_freqs_path (rd));
+ || estimated_freqs_path (rd));
if (do_freqs_to_counts)
freqs_to_counts_path (rd);
non-joiner case the path_in_count and path_out_count should be the
same. */
bool has_joiner = compute_path_counts (rd, local_info,
- &path_in_count, &path_out_count,
- &path_in_freq);
+ &path_in_count, &path_out_count,
+ &path_in_freq);
int cur_path_freq = path_in_freq;
for (unsigned int count = 0, i = 1; i < path->length (); i++)
edge victim;
edge e2;
- gcc_assert (has_joiner);
+ gcc_assert (has_joiner);
/* This updates the PHIs at the destination of the duplicate
block. Pass 0 instead of i if we are threading a path which
/* Next we need to update the counts of the original and duplicated
edges from the joiner that go off path. */
update_joiner_offpath_counts (epath, e2->src, path_in_count,
- path_out_count);
+ path_out_count);
/* Finally, we need to set the probabilities on the duplicated
edges out of the duplicated joiner (e2->src). The probabilities
cur_path_freq);
}
else
- {
+ {
/* No copy case. In this case we don't have an equivalent block
on the duplicated thread path to update, but we do need
to remove the portion of the counts/freqs that were moved
}
/* Increment the index into the duplicated path when we processed
- a duplicated block. */
+ a duplicated block. */
if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK
- || (*path)[i]->type == EDGE_COPY_SRC_BLOCK)
+ || (*path)[i]->type == EDGE_COPY_SRC_BLOCK)
{
count++;
}
|| (*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
{
create_block_for_threading ((*path)[i]->e->src, rd, 1,
- &local_info->duplicate_blocks);
+ &local_info->duplicate_blocks);
break;
}
}
if (local_info->template_block == NULL)
{
create_block_for_threading ((*path)[1]->e->src, rd, 0,
- &local_info->duplicate_blocks);
+ &local_info->duplicate_blocks);
local_info->template_block = rd->dup_blocks[0];
/* We do not create any outgoing edges for the template. We will
else
{
create_block_for_threading (local_info->template_block, rd, 0,
- &local_info->duplicate_blocks);
+ &local_info->duplicate_blocks);
/* Go ahead and wire up outgoing edges and update PHIs for the duplicate
block. */
struct redirection_data *rd = *slot;
struct el *next, *el;
- /* Walk over all the incoming edges associated associated with this
- hash table entry. */
+ /* Walk over all the incoming edges associated with this hash table
+ entry. */
for (el = rd->incoming_edges; el; el = next)
{
edge e = el->e;
cases where the second path starts at a downstream edge on the same
path). First record all joiner paths, deleting any in the unexpected
case where there is already a path for that incoming edge. */
- for (i = 0; i < paths.length (); i++)
+ for (i = 0; i < paths.length ();)
{
vec<jump_thread_edge *> *path = paths[i];
if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
- {
+ {
/* Attach the path to the starting edge if none is yet recorded. */
- if ((*path)[0]->e->aux == NULL)
+ if ((*path)[0]->e->aux == NULL)
{
- (*path)[0]->e->aux = path;
+ (*path)[0]->e->aux = path;
+ i++;
}
else
{
paths.unordered_remove (i);
if (dump_file && (dump_flags & TDF_DETAILS))
- dump_jump_thread_path (dump_file, *path, false);
+ dump_jump_thread_path (dump_file, *path, false);
delete_jump_thread_path (path);
}
- }
+ }
+ else
+ {
+ i++;
+ }
}
+
/* Second, look for paths that have any other jump thread attached to
them, and either finish converting them or cancel them. */
- for (i = 0; i < paths.length (); i++)
+ for (i = 0; i < paths.length ();)
{
vec<jump_thread_edge *> *path = paths[i];
edge e = (*path)[0]->e;
/* If we iterated through the entire path without exiting the loop,
then we are good to go, record it. */
if (j == path->length ())
- bitmap_set_bit (tmp, e->dest->index);
+ {
+ bitmap_set_bit (tmp, e->dest->index);
+ i++;
+ }
else
{
e->aux = NULL;
paths.unordered_remove (i);
if (dump_file && (dump_flags & TDF_DETAILS))
- dump_jump_thread_path (dump_file, *path, false);
+ dump_jump_thread_path (dump_file, *path, false);
delete_jump_thread_path (path);
}
}
+ else
+ {
+ i++;
+ }
}
/* If optimizing for size, only thread through block if we don't have
static bool
bb_ends_with_multiway_branch (basic_block bb ATTRIBUTE_UNUSED)
{
- gimple stmt = last_stmt (bb);
+ gimple *stmt = last_stmt (bb);
if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
return true;
if (stmt && gimple_code (stmt) == GIMPLE_GOTO
scale_bbs_frequencies_int (region_copy, n_region, entry_freq, total_freq);
}
-#ifdef ENABLE_CHECKING
- verify_jump_thread (region_copy, n_region);
-#endif
+ if (flag_checking)
+ verify_jump_thread (region_copy, n_region);
/* Remove the last branch in the jump thread path. */
remove_ctrl_stmt_and_useless_edges (region_copy[n_region - 1], exit->dest);
+
+ /* And fixup the flags on the single remaining edge. */
+ edge fix_e = find_edge (region_copy[n_region - 1], exit->dest);
+ fix_e->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
+ fix_e->flags |= EDGE_FALLTHRU;
+
edge e = make_edge (region_copy[n_region - 1], exit->dest, EDGE_FALLTHRU);
if (e) {
valid_jump_thread_path (vec<jump_thread_edge *> *path)
{
unsigned len = path->length ();
+ bool multiway_branch = false;
- /* Check that the path is connected. */
+ /* Check that the path is connected and see if there's a multi-way
+ branch on the path. */
for (unsigned int j = 0; j < len - 1; j++)
- if ((*path)[j]->e->dest != (*path)[j+1]->e->src)
- return false;
+ {
+ if ((*path)[j]->e->dest != (*path)[j+1]->e->src)
+ return false;
+ gimple *last = last_stmt ((*path)[j]->e->dest);
+ multiway_branch |= (last && gimple_code (last) == GIMPLE_SWITCH);
+ }
+
+ /* If we are trying to thread the loop latch to a block that does
+ not dominate the loop latch, then that will create an irreducible
+ loop. We avoid that unless the jump thread has a multi-way
+ branch, in which case we have deemed it worth losing other
+ loop optimizations later if we can eliminate the multi-way branch. */
+ edge e = (*path)[0]->e;
+ struct loop *loop = e->dest->loop_father;
+ if (!multiway_branch
+ && loop->latch
+ && loop_latch_edge (loop) == e
+ && (determine_bb_domination_status (loop, path->last ()->e->dest)
+ == DOMST_NONDOMINATING))
+ return false;
return true;
}
+/* Remove any queued jump threads that include edge E.
+
+ We don't actually remove them here, just record the edges into ax
+ hash table. That way we can do the search once per iteration of
+ DOM/VRP rather than for every case where DOM optimizes away a COND_EXPR. */
+
+void
+remove_jump_threads_including (edge_def *e)
+{
+ if (!paths.exists ())
+ return;
+
+ if (!removed_edges)
+ removed_edges = new hash_table<struct removed_edges> (17);
+
+ edge *slot = removed_edges->find_slot (e, INSERT);
+ *slot = e;
+}
+
/* Walk through all blocks and thread incoming edges to the appropriate
outgoing edge for each edge pair recorded in THREADED_EDGES.
struct loop *loop;
if (!paths.exists ())
- return false;
+ {
+ retval = false;
+ goto out;
+ }
threaded_blocks = BITMAP_ALLOC (NULL);
memset (&thread_stats, 0, sizeof (thread_stats));
+ /* Remove any paths that referenced removed edges. */
+ if (removed_edges)
+ for (i = 0; i < paths.length (); )
+ {
+ unsigned int j;
+ vec<jump_thread_edge *> *path = paths[i];
+
+ for (j = 0; j < path->length (); j++)
+ {
+ edge e = (*path)[j]->e;
+ if (removed_edges->find_slot (e, NO_INSERT))
+ break;
+ }
+
+ if (j != path->length ())
+ {
+ delete_jump_thread_path (path);
+ paths.unordered_remove (i);
+ continue;
+ }
+ i++;
+ }
+
/* Jump-thread all FSM threads before other jump-threads. */
for (i = 0; i < paths.length ();)
{
if (bitmap_bit_p (threaded_blocks, entry->src->index)
/* Verify that the jump thread path is still valid: a
previous jump-thread may have changed the CFG, and
- invalidated the current path. */
+ invalidated the current path or the requested jump
+ thread might create irreducible loops which should
+ generally be avoided. */
|| !valid_jump_thread_path (path))
{
/* Remove invalid FSM jump-thread paths. */
free_dominance_info (CDI_DOMINATORS);
bitmap_set_bit (threaded_blocks, entry->src->index);
retval = true;
+ thread_stats.num_threaded_edges++;
}
delete_jump_thread_path (path);
}
/* Our path is still valid, thread it. */
- if (e->aux)
+ if (e->aux)
{
if (thread_block ((*path)[0]->e->dest, false))
e->aux = NULL;
else
{
- delete_jump_thread_path (path);
+ delete_jump_thread_path (path);
e->aux = NULL;
ei_next (&ei);
}
if (retval)
loops_state_set (LOOPS_NEED_FIXUP);
+ out:
+ delete removed_edges;
+ removed_edges = NULL;
return retval;
}