+2005-12-17 Danny Berlin <dberlin@dberlin.org>
+ Kenneth Zadeck <zadeck@naturalbridge.com>
+
+ * basic-block.h: Changed basic block numbering so that the entry
+ block is 0 and the exit block is 1. Changed insn iterators so
+ that they are tolerant of blocks with no insns.
+ * regrename.c (copyprop_hardreg_forward): Changed basic block
+ numbering so that the entry block is 0 and the exit block is 1.
+ * sched-ebb.c (sehedule_ebbs): Ditto.
+ * tracer.c (branch_ratio_cutoff): Ditto.
+ * cfgloopmanip.c (fix_loop_structure): Ditto.
+ * cfghooks.c (verify_flow_info): Ditto.
+ * cfg.c (compact_blocks): Ditto.
+ * reorg.c (dbr_schedule): Ditto.
+ * flow.c (calculate_global_regs_live, libcall_dead_p): Ditto.
+ * dominance.c (calc_dfs_tree_nonrec, calc_dfs_tree,
+ calculate_dominance_info): Ditto.
+ * cfganal.c (create_edge_list, print_edge_list,
+ flow_depth_first_order_compute, flow_dfs_compute_reverse_init,
+ flow_dfs_compute_reverse_add_bb, flow_dfs_compute_reverse_execute,
+ dfs_enumerate_from): Ditto.
+ * global.c (global_alloc, set_up_bb_rts_numbers): Ditto.
+ * ifcvt.c (find_if_case_2): Ditto.
+ * cfgbuild.c (control_flow_insn_p, count_basic_blocks,
+ find_basic_blocks): Ditto.
+ * predict.c (predict_loops, tree_bb_level_predictions,
+ predict_paths_leading_to, propagate_freq): Ditto.
+ * lcm.c (compute_antinout_edge, compute_laterin,
+ compute_available): Ditto.
+ * function.c (thread_prologue_and_epilogue_insns): Ditto.
+ * gcse.c (gcse_main, bypass_jumps): Ditto.
+ * profile.c (compute_branch_probabilities,
+ compute_value_histograms, branch_prob): Ditto.
+ * tree-flow-inline.h (bsi_start, bsi_after_labels,
+ bsi_last): Ditto.
+ * tree-ssa-phiopt.c (tree_ssa_phiopt,
+ blocks_in_phiopt_order): Ditto.
+ * bt-load.c (compute_defs_uses_and_gen, compute_kill,
+ compute_out, link_btr_uses, migrate_btr_defs): Ditto.
+ * tree-dfa.c (collect_dfa_stats): Ditto.
+ * cfgcleanup.c (try_forward_edges, try_optimize_cfg): Ditto.
+ * cfglayout.c (fixup_reorder_chain): Ditto.
+ * bb-reorder.c (reorder_basic_blocks, duplicate_computed_gotos,
+ partition_hot_cold_basic_blocks): Ditto.
+ * var-tracking.c (vt_find_locations): Ditto.
+ * cfgloop.c (flow_loops_cfg_dump, flow_loops_find, get_loop_body): Ditto.
+ * sched-rgn.c (compute_trg_info, init_regions, schedule_insns): Ditto.
+ * tree-cfg.c (init_empty_tree_cfg, build_tree_cfg, make_edges
+ label_to_block_fn, print_loop_ir, tree_flow_call_edges_add): Ditto.
+ * tree-ssa-reassoc.c (init_reassoc): Ditto.
+ * cfgrtl.c (entry_of_function, rtl_verify_flow_info,
+ rtl_flow_call_edges_add, rtl_flow_call_edges_add): Ditto.
+ * df.c (df_analyze_1, hybrid_search, iterative_dataflow): Ditto
+ and removed unused reverse orders.
+ * df.h (): Ditto.
+ * combine.c: Fix document typo.
+
2005-12-17 Jan Hubicka <jh@suse.cz>
* tree-flow-inline.h (set_default_def, default_def): Kill.
/* For iterating over insns in basic block. */
#define FOR_BB_INSNS(BB, INSN) \
for ((INSN) = BB_HEAD (BB); \
- (INSN) != NEXT_INSN (BB_END (BB)); \
+ (INSN) && (INSN) != NEXT_INSN (BB_END (BB)); \
(INSN) = NEXT_INSN (INSN))
#define FOR_BB_INSNS_REVERSE(BB, INSN) \
for ((INSN) = BB_END (BB); \
- (INSN) != PREV_INSN (BB_HEAD (BB)); \
+ (INSN) && (INSN) != PREV_INSN (BB_HEAD (BB)); \
(INSN) = PREV_INSN (INSN))
/* Cycles through _all_ basic blocks, even the fake ones (entry and
#define BB_END(B) (B)->il.rtl->end_
/* Special block numbers [markers] for entry and exit. */
-#define ENTRY_BLOCK (-1)
-#define EXIT_BLOCK (-2)
+#define ENTRY_BLOCK (0)
+#define EXIT_BLOCK (1)
+
+/* The two blocks that are always in the cfg. */
+#define NUM_FIXED_BLOCKS (2)
-/* Special block number not valid for any block. */
-#define INVALID_BLOCK (-3)
#define BLOCK_NUM(INSN) (BLOCK_FOR_INSN (INSN)->index + 0)
#define set_block_for_insn(INSN, BB) (BLOCK_FOR_INSN (INSN) = BB)
int i;
struct trace *traces;
- if (n_basic_blocks <= 1)
+ if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
return;
if (targetm.cannot_modify_jumps_p ())
bitmap candidates;
int max_size;
- if (n_basic_blocks <= 1)
+ if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
return;
if (targetm.cannot_modify_jumps_p ())
int n_crossing_edges;
int max_edges = 2 * last_basic_block;
- if (n_basic_blocks <= 1)
+ if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
return;
crossing_edges = xcalloc (max_edges, sizeof (edge));
cfg_layout_initialize (0);
FOR_EACH_BB (cur_bb)
- if (cur_bb->index >= 0
- && cur_bb->next_bb->index >= 0)
+ if (cur_bb->index >= NUM_FIXED_BLOCKS
+ && cur_bb->next_bb->index >= NUM_FIXED_BLOCKS)
cur_bb->aux = cur_bb->next_bb;
find_rarely_executed_basic_blocks_and_crossing_edges (crossing_edges,
defs_uses_info info;
sbitmap_vector_zero (bb_gen, n_basic_blocks);
- for (i = 0; i < n_basic_blocks; i++)
+ for (i = NUM_FIXED_BLOCKS; i < n_basic_blocks; i++)
{
basic_block bb = BASIC_BLOCK (i);
int reg;
/* For each basic block, form the set BB_KILL - the set
of definitions that the block kills. */
sbitmap_vector_zero (bb_kill, n_basic_blocks);
- for (i = 0; i < n_basic_blocks; i++)
+ for (i = NUM_FIXED_BLOCKS; i < n_basic_blocks; i++)
{
for (regno = first_btr; regno <= last_btr; regno++)
if (TEST_HARD_REG_BIT (all_btrs, regno)
int changed;
sbitmap bb_in = sbitmap_alloc (max_uid);
- for (i = 0; i < n_basic_blocks; i++)
+ for (i = NUM_FIXED_BLOCKS; i < n_basic_blocks; i++)
sbitmap_copy (bb_out[i], bb_gen[i]);
changed = 1;
while (changed)
{
changed = 0;
- for (i = 0; i < n_basic_blocks; i++)
+ for (i = NUM_FIXED_BLOCKS; i < n_basic_blocks; i++)
{
sbitmap_union_of_preds (bb_in, bb_out, i);
changed |= sbitmap_union_of_diff_cg (bb_out[i], bb_gen[i],
/* Link uses to the uses lists of all of their reaching defs.
Count up the number of reaching defs of each use. */
- for (i = 0; i < n_basic_blocks; i++)
+ for (i = NUM_FIXED_BLOCKS; i < n_basic_blocks; i++)
{
basic_block bb = BASIC_BLOCK (i);
rtx insn;
{
int i;
- for (i = 0; i < n_basic_blocks; i++)
+ for (i = NUM_FIXED_BLOCKS; i < n_basic_blocks; i++)
{
basic_block bb = BASIC_BLOCK (i);
fprintf(dump_file,
int i;
basic_block bb;
- i = 0;
- FOR_EACH_BB (bb)
+ BASIC_BLOCK (ENTRY_BLOCK) = ENTRY_BLOCK_PTR;
+ BASIC_BLOCK (EXIT_BLOCK) = EXIT_BLOCK_PTR;
+
+ i = NUM_FIXED_BLOCKS;
+ FOR_EACH_BB (bb)
{
BASIC_BLOCK (i) = bb;
bb->index = i;
basic_block bb;
edge_iterator ei;
- block_count = n_basic_blocks + 2; /* Include the entry and exit blocks. */
+ block_count = n_basic_blocks; /* Include the entry and exit blocks. */
num_edges = 0;
int x;
fprintf (f, "Compressed edge list, %d BBs + entry & exit, and %d edges\n",
- elist->num_blocks - 2, elist->num_edges);
+ elist->num_blocks, elist->num_edges);
for (x = 0; x < elist->num_edges; x++)
{
edge_iterator *stack;
int sp;
int dfsnum = 0;
- int rcnum = n_basic_blocks - 1;
+ int rcnum = n_basic_blocks - 1 - NUM_FIXED_BLOCKS;
sbitmap visited;
/* Allocate stack for back-tracking up CFG. */
free (stack);
sbitmap_free (visited);
- /* The number of nodes visited should be the number of blocks. */
- gcc_assert (dfsnum == n_basic_blocks);
+ /* The number of nodes visited should be the number of blocks minus
+ the entry and exit blocks which are not visited here. */
+ gcc_assert (dfsnum == n_basic_blocks - NUM_FIXED_BLOCKS);
return dfsnum;
}
flow_dfs_compute_reverse_init (depth_first_search_ds data)
{
/* Allocate stack for back-tracking up CFG. */
- data->stack = xmalloc ((n_basic_blocks - (INVALID_BLOCK + 1))
- * sizeof (basic_block));
+ data->stack = xmalloc (n_basic_blocks * sizeof (basic_block));
data->sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
- data->visited_blocks = sbitmap_alloc (last_basic_block - (INVALID_BLOCK + 1));
+ data->visited_blocks = sbitmap_alloc (last_basic_block);
/* None of the nodes in the CFG have been visited yet. */
sbitmap_zero (data->visited_blocks);
flow_dfs_compute_reverse_add_bb (depth_first_search_ds data, basic_block bb)
{
data->stack[data->sp++] = bb;
- SET_BIT (data->visited_blocks, bb->index - (INVALID_BLOCK + 1));
+ SET_BIT (data->visited_blocks, bb->index);
}
/* Continue the depth-first search through the reverse graph starting with the
/* Perform depth-first search on adjacent vertices. */
FOR_EACH_EDGE (e, ei, bb->preds)
- if (!TEST_BIT (data->visited_blocks,
- e->src->index - (INVALID_BLOCK + 1)))
+ if (!TEST_BIT (data->visited_blocks, e->src->index))
flow_dfs_compute_reverse_add_bb (data, e->src);
}
/* Determine if there are unvisited basic blocks. */
FOR_BB_BETWEEN (bb, last_unvisited, NULL, prev_bb)
- if (!TEST_BIT (data->visited_blocks, bb->index - (INVALID_BLOCK + 1)))
+ if (!TEST_BIT (data->visited_blocks, bb->index))
return bb;
return NULL;
static sbitmap visited;
static unsigned v_size;
-#define MARK_VISITED(BB) (SET_BIT (visited, (BB)->index + 2))
-#define UNMARK_VISITED(BB) (RESET_BIT (visited, (BB)->index + 2))
-#define VISITED_P(BB) (TEST_BIT (visited, (BB)->index + 2))
+#define MARK_VISITED(BB) (SET_BIT (visited, (BB)->index))
+#define UNMARK_VISITED(BB) (RESET_BIT (visited, (BB)->index))
+#define VISITED_P(BB) (TEST_BIT (visited, (BB)->index))
/* Resize the VISITED sbitmap if necessary. */
- size = last_basic_block + 2;
+ size = last_basic_block;
if (size < 10)
size = 10;
static int
count_basic_blocks (rtx f)
{
- int count = 0;
+ int count = NUM_FIXED_BLOCKS;
bool saw_insn = false;
rtx insn;
/* The rest of the compiler works a bit smoother when we don't have to
check for the edge case of do-nothing functions with no basic blocks. */
- if (count == 0)
+ if (count == NUM_FIXED_BLOCKS)
{
emit_insn (gen_rtx_USE (VOIDmode, const0_rtx));
- count = 1;
+ count = NUM_FIXED_BLOCKS + 1;
}
return count;
}
n_basic_blocks = count_basic_blocks (f);
- last_basic_block = 0;
+ last_basic_block = NUM_FIXED_BLOCKS;
ENTRY_BLOCK_PTR->next_bb = EXIT_BLOCK_PTR;
EXIT_BLOCK_PTR->prev_bb = ENTRY_BLOCK_PTR;
+
/* Size the basic block table. The actual structures will be allocated
by find_basic_blocks_1, since we want to keep the structure pointers
stable across calls to find_basic_blocks. */
actually lay them out. */
VARRAY_BB_INIT (basic_block_info, n_basic_blocks, "basic_block_info");
+ BASIC_BLOCK (ENTRY_BLOCK) = ENTRY_BLOCK_PTR;
+ BASIC_BLOCK (EXIT_BLOCK) = EXIT_BLOCK_PTR;
find_basic_blocks_1 (f);
}
target = first = e->dest;
- counter = 0;
+ counter = NUM_FIXED_BLOCKS;
/* If we are partitioning hot/cold basic_blocks, we don't want to mess
up jumps that cross between hot/cold sections.
if (t->dest == b)
break;
- gcc_assert (nthreaded_edges < n_basic_blocks);
+ gcc_assert (nthreaded_edges < n_basic_blocks - NUM_FIXED_BLOCKS);
threaded_edges[nthreaded_edges++] = t;
new_target = t->dest;
/* Note that forwarder_block_p true ensures that
there is a successor for this block. */
&& (single_succ_edge (b)->flags & EDGE_FALLTHRU)
- && n_basic_blocks > 1)
+ && n_basic_blocks > NUM_FIXED_BLOCKS + 1)
{
if (dump_file)
fprintf (dump_file,
basic_block *last_visited;
timevar_push (TV_CFG_VERIFY);
- last_visited = xcalloc (last_basic_block + 2, sizeof (basic_block));
- edge_checksum = xcalloc (last_basic_block + 2, sizeof (size_t));
+ last_visited = xcalloc (last_basic_block, sizeof (basic_block));
+ edge_checksum = xcalloc (last_basic_block, sizeof (size_t));
/* Check bb chain & numbers. */
last_bb_seen = ENTRY_BLOCK_PTR;
}
FOR_EACH_EDGE (e, ei, bb->succs)
{
- if (last_visited [e->dest->index + 2] == bb)
+ if (last_visited [e->dest->index] == bb)
{
error ("verify_flow_info: Duplicate edge %i->%i",
e->src->index, e->dest->index);
err = 1;
}
- last_visited [e->dest->index + 2] = bb;
+ last_visited [e->dest->index] = bb;
if (e->flags & EDGE_FALLTHRU)
n_fallthru++;
err = 1;
}
- edge_checksum[e->dest->index + 2] += (size_t) e;
+ edge_checksum[e->dest->index] += (size_t) e;
}
if (n_fallthru > 1)
{
err = 1;
}
- edge_checksum[e->dest->index + 2] -= (size_t) e;
+ edge_checksum[e->dest->index] -= (size_t) e;
}
}
edge_iterator ei;
FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
- edge_checksum[e->dest->index + 2] += (size_t) e;
+ edge_checksum[e->dest->index] += (size_t) e;
FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
- edge_checksum[e->dest->index + 2] -= (size_t) e;
+ edge_checksum[e->dest->index] -= (size_t) e;
}
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
- if (edge_checksum[bb->index + 2])
+ if (edge_checksum[bb->index])
{
error ("basic block %i edge lists are corrupted", bb->index);
err = 1;
/* First do the bulk reordering -- rechain the blocks without regard to
the needed changes to jumps and labels. */
- for (bb = ENTRY_BLOCK_PTR->next_bb, index = 0;
+ for (bb = ENTRY_BLOCK_PTR->next_bb, index = NUM_FIXED_BLOCKS;
bb != 0;
bb = bb->aux, index++)
{
if (dump_file)
{
fprintf (dump_file, "Reordered sequence:\n");
- for (bb = ENTRY_BLOCK_PTR->next_bb, index = 0;
+ for (bb = ENTRY_BLOCK_PTR->next_bb, index = NUM_FIXED_BLOCKS;
bb;
bb = bb->aux, index++)
{
prev_bb = ENTRY_BLOCK_PTR;
bb = ENTRY_BLOCK_PTR->next_bb;
- index = 0;
+ index = NUM_FIXED_BLOCKS;
for (; bb; prev_bb = bb, bb = bb->aux, index ++)
{
if (loops->cfg.dfs_order)
{
fputs (";; DFS order: ", file);
- for (i = 0; i < n_basic_blocks; i++)
+ for (i = NUM_FIXED_BLOCKS; i < n_basic_blocks; i++)
fprintf (file, "%d ", loops->cfg.dfs_order[i]);
fputs ("\n", file);
if (loops->cfg.rc_order)
{
fputs (";; RC order: ", file);
- for (i = 0; i < n_basic_blocks; i++)
+ for (i = NUM_FIXED_BLOCKS; i < n_basic_blocks; i++)
fprintf (file, "%d ", loops->cfg.rc_order[i]);
fputs ("\n", file);
/* Taking care of this degenerate case makes the rest of
this code simpler. */
- if (n_basic_blocks == 0)
+ if (n_basic_blocks == NUM_FIXED_BLOCKS)
return 0;
dfs_order = NULL;
loops->parray[0]->outer = NULL;
loops->parray[0]->depth = 0;
loops->parray[0]->pred = NULL;
- loops->parray[0]->num_nodes = n_basic_blocks + 2;
+ loops->parray[0]->num_nodes = n_basic_blocks;
loops->parray[0]->latch = EXIT_BLOCK_PTR;
loops->parray[0]->header = ENTRY_BLOCK_PTR;
ENTRY_BLOCK_PTR->loop_father = loops->parray[0];
num_loops = 1;
- for (b = 0; b < n_basic_blocks; b++)
+ for (b = 0; b < n_basic_blocks - NUM_FIXED_BLOCKS; b++)
{
struct loop *loop;
edge_iterator ei;
if (loop->latch == EXIT_BLOCK_PTR)
{
/* There may be blocks unreachable from EXIT_BLOCK. */
- gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks + 2);
+ gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks);
FOR_EACH_BB (bb)
tovisit[tv++] = bb;
tovisit[tv++] = EXIT_BLOCK_PTR;
}
/* Remove the dead loops from structures. */
- loops->tree_root->num_nodes = n_basic_blocks + 2;
+ loops->tree_root->num_nodes = n_basic_blocks;
for (i = 1; i < loops->num; i++)
{
loop = loops->parray[i];
rtx
entry_of_function (void)
{
- return (n_basic_blocks ? BB_HEAD (ENTRY_BLOCK_PTR->next_bb) : get_insns ());
+ return (n_basic_blocks > NUM_FIXED_BLOCKS ?
+ BB_HEAD (ENTRY_BLOCK_PTR->next_bb) : get_insns ());
}
/* Update insns block within BB. */
curr_bb = NULL;
}
- if (num_bb_notes != n_basic_blocks)
+ if (num_bb_notes != n_basic_blocks - NUM_FIXED_BLOCKS)
internal_error
("number of bb notes in insn chain (%d) != n_basic_blocks (%d)",
num_bb_notes, n_basic_blocks);
int last_bb = last_basic_block;
bool check_last_block = false;
- if (n_basic_blocks == 0)
+ if (n_basic_blocks == NUM_FIXED_BLOCKS)
return 0;
if (! blocks)
calls since there is no way that we can determine if they will
return or not... */
- for (i = 0; i < last_bb; i++)
+ for (i = NUM_FIXED_BLOCKS; i < last_bb; i++)
{
basic_block bb = BASIC_BLOCK (i);
rtx insn;
- reg_live_length is not updated
- reg_n_refs is not adjusted in the rare case when a register is
no longer required in a computation
- - there are extremely rare cases (see distribute_regnotes) when a
+ - there are extremely rare cases (see distribute_notes) when a
REG_DEAD note is lost
- a LOG_LINKS entry that refers to an insn with multiple SETs may be
removed because there is no way to know which register it was
{
int aflags;
int dflags;
- int i;
basic_block bb;
struct dataflow dflow;
df->dfs_order = xmalloc (sizeof (int) * n_basic_blocks);
df->rc_order = xmalloc (sizeof (int) * n_basic_blocks);
df->rts_order = xmalloc (sizeof (int) * n_basic_blocks);
- df->inverse_dfs_map = xmalloc (sizeof (int) * last_basic_block);
- df->inverse_rc_map = xmalloc (sizeof (int) * last_basic_block);
- df->inverse_rts_map = xmalloc (sizeof (int) * last_basic_block);
flow_depth_first_order_compute (df->dfs_order, df->rc_order);
flow_reverse_top_sort_order_compute (df->rts_order);
- for (i = 0; i < n_basic_blocks; i++)
- {
- df->inverse_dfs_map[df->dfs_order[i]] = i;
- df->inverse_rc_map[df->rc_order[i]] = i;
- df->inverse_rts_map[df->rts_order[i]] = i;
- }
if (aflags & DF_RD)
{
/* Compute the sets of gens and kills for the defs of each bb. */
free (df->dfs_order);
free (df->rc_order);
free (df->rts_order);
- free (df->inverse_rc_map);
- free (df->inverse_dfs_map);
- free (df->inverse_rts_map);
}
DATAFLOW, producing the in and out sets. Only the part of the cfg
induced by blocks in DATAFLOW->order is taken into account.
- For forward problems, you probably want to pass in a mapping of
- block number to rc_order (like df->inverse_rc_map). */
+ For forward problems, you probably want to pass in rc_order. */
void
iterative_dataflow (struct dataflow *dataflow)
sbitmap_zero (visited);
sbitmap_zero (considered);
- for (i = 0; i < dataflow->n_blocks; i++)
+ for (i = 0; i < dataflow->n_blocks - NUM_FIXED_BLOCKS; i++)
{
idx = dataflow->order[i];
SET_BIT (pending, idx);
while (1)
{
- for (i = 0; i < dataflow->n_blocks; i++)
+ for (i = 0; i < dataflow->n_blocks - NUM_FIXED_BLOCKS ; i++)
{
idx = dataflow->order[i];
int *dfs_order; /* DFS order -> block number. */
int *rc_order; /* Reverse completion order -> block number. */
int *rts_order; /* Reverse top sort order -> block number. */
- int *inverse_rc_map; /* Block number -> reverse completion order. */
- int *inverse_dfs_map; /* Block number -> DFS order. */
- int *inverse_rts_map; /* Block number -> reverse top-sort order. */
};
'undefined' or 'end of list'. The name of each node is given by the dfs
number of the corresponding basic block. Please note, that we include the
artificial ENTRY_BLOCK (or EXIT_BLOCK in the post-dom case) in our lists to
- support multiple entry points. As it has no real basic block index we use
- 'last_basic_block' for that. Its dfs number is of course 1. */
+ support multiple entry points. Its dfs number is of course 1. */
/* Type of Basic Block aka. TBB */
typedef unsigned int TBB;
static void
init_dom_info (struct dom_info *di, enum cdi_direction dir)
{
- /* We need memory for n_basic_blocks nodes and the ENTRY_BLOCK or
- EXIT_BLOCK. */
- unsigned int num = n_basic_blocks + 1 + 1;
+ unsigned int num = n_basic_blocks;
init_ar (di->dfs_parent, TBB, num, 0);
init_ar (di->path_min, TBB, num, i);
init_ar (di->key, TBB, num, i);
/* Ending block. */
basic_block ex_block;
- stack = xmalloc ((n_basic_blocks + 3) * sizeof (edge_iterator));
+ stack = xmalloc ((n_basic_blocks + 1) * sizeof (edge_iterator));
sp = 0;
/* Initialize our border blocks, and the first edge. */
di->nodes = di->dfsnum - 1;
- /* Make sure there is a path from ENTRY to EXIT at all. */
- gcc_assert (di->nodes == (unsigned int) n_basic_blocks + 1);
+ /* This aborts e.g. when there is _no_ path from ENTRY to EXIT at all. */
+ gcc_assert (di->nodes == (unsigned int) n_basic_blocks - 1);
}
/* Compress the path from V to the root of its set and update path_min at the
{
b->dom[dir] = et_new_tree (b);
}
- n_bbs_in_dom_tree[dir] = n_basic_blocks + 2;
+ n_bbs_in_dom_tree[dir] = n_basic_blocks;
init_dom_info (&di, dir);
calc_dfs_tree (&di, dir);
SET_REGNO_REG_SET (invalidated_by_call, i);
/* Allocate space for the sets of local properties. */
- local_sets = xcalloc (last_basic_block - (INVALID_BLOCK + 1),
- sizeof (regset));
- cond_local_sets = xcalloc (last_basic_block - (INVALID_BLOCK + 1),
- sizeof (regset));
-
- /* Create a worklist. Allocate an extra slot for ENTRY_BLOCK, and one
- because the `head == tail' style test for an empty queue doesn't
- work with a full queue. */
- queue = xmalloc ((n_basic_blocks - (INVALID_BLOCK + 1)) * sizeof (*queue));
+ local_sets = xcalloc (last_basic_block, sizeof (regset));
+ cond_local_sets = xcalloc (last_basic_block, sizeof (regset));
+
+ /* Create a worklist. Allocate an extra slot for the `head == tail'
+ style test for an empty queue doesn't work with a full queue. */
+ queue = xmalloc ((n_basic_blocks + 1) * sizeof (*queue));
qtail = queue;
- qhead = qend = queue + n_basic_blocks - (INVALID_BLOCK + 1);
+ qhead = qend = queue + n_basic_blocks;
/* Queue the blocks set in the initial mask. Do this in reverse block
number order so that we are more likely for the first round to do
basic block. On subsequent passes, we get to skip out early if
live_at_end wouldn't have changed. */
- if (local_sets[bb->index - (INVALID_BLOCK + 1)] == NULL)
+ if (local_sets[bb->index] == NULL)
{
- local_sets[bb->index - (INVALID_BLOCK + 1)]
- = ALLOC_REG_SET (®_obstack);
- cond_local_sets[bb->index - (INVALID_BLOCK + 1)]
- = ALLOC_REG_SET (®_obstack);
+ local_sets[bb->index] = ALLOC_REG_SET (®_obstack);
+ cond_local_sets[bb->index] = ALLOC_REG_SET (®_obstack);
rescan = 1;
}
else
successor block. We can miss changes in those sets if
we only compare the new live_at_end against the
previous one. */
- cond_local_set = cond_local_sets[bb->index - (INVALID_BLOCK + 1)];
+ cond_local_set = cond_local_sets[bb->index];
rescan = bitmap_intersect_p (new_live_at_end, cond_local_set);
}
/* If any of the changed bits overlap with local_sets[bb],
we'll have to rescan the block. */
- local_set = local_sets[bb->index - (INVALID_BLOCK + 1)];
+ local_set = local_sets[bb->index];
rescan = bitmap_intersect_p (tmp, local_set);
}
}
/* Rescan the block insn by insn to turn (a copy of) live_at_end
into live_at_start. */
propagate_block (bb, new_live_at_end,
- local_sets[bb->index - (INVALID_BLOCK + 1)],
- cond_local_sets[bb->index - (INVALID_BLOCK + 1)],
+ local_sets[bb->index],
+ cond_local_sets[bb->index],
flags);
/* If live_at start didn't change, no need to go farther. */
SET_BIT (blocks_out, pbb->index);
/* Makes sure to really rescan the block. */
- if (local_sets[pbb->index - (INVALID_BLOCK + 1)])
+ if (local_sets[pbb->index])
{
- FREE_REG_SET (local_sets[pbb->index - (INVALID_BLOCK + 1)]);
- FREE_REG_SET (cond_local_sets[pbb->index - (INVALID_BLOCK + 1)]);
- local_sets[pbb->index - (INVALID_BLOCK + 1)] = 0;
+ FREE_REG_SET (local_sets[pbb->index]);
+ FREE_REG_SET (cond_local_sets[pbb->index]);
+ local_sets[pbb->index] = 0;
}
/* Add it to the queue. */
EXECUTE_IF_SET_IN_SBITMAP (blocks_out, 0, i, sbi)
{
basic_block bb = BASIC_BLOCK (i);
- FREE_REG_SET (local_sets[bb->index - (INVALID_BLOCK + 1)]);
- FREE_REG_SET (cond_local_sets[bb->index - (INVALID_BLOCK + 1)]);
+ FREE_REG_SET (local_sets[bb->index]);
+ FREE_REG_SET (cond_local_sets[bb->index]);
};
}
else
{
FOR_EACH_BB (bb)
{
- FREE_REG_SET (local_sets[bb->index - (INVALID_BLOCK + 1)]);
- FREE_REG_SET (cond_local_sets[bb->index - (INVALID_BLOCK + 1)]);
+ FREE_REG_SET (local_sets[bb->index]);
+ FREE_REG_SET (cond_local_sets[bb->index]);
}
}
int
regno_clobbered_at_setjmp (int regno)
{
- if (n_basic_blocks == 0)
+ if (n_basic_blocks == NUM_FIXED_BLOCKS)
return 0;
return ((REG_N_SETS (regno) > 1
fixup_fallthru_exit_predecessor. */
cfg_layout_initialize (0);
FOR_EACH_BB (cur_bb)
- if (cur_bb->index >= 0 && cur_bb->next_bb->index >= 0)
+ if (cur_bb->index >= NUM_FIXED_BLOCKS
+ && cur_bb->next_bb->index >= NUM_FIXED_BLOCKS)
cur_bb->aux = cur_bb->next_bb;
cfg_layout_finalize ();
}
dump_flow_info (file);
/* Return if there's nothing to do, or it is too expensive. */
- if (n_basic_blocks <= 1 || is_too_expensive (_("GCSE disabled")))
+ if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1 || is_too_expensive (_("GCSE disabled")))
return 0;
gcc_obstack_init (&gcse_obstack);
dump_flow_info (file);
/* Return if there's nothing to do, or it is too expensive. */
- if (n_basic_blocks <= 1 || is_too_expensive (_ ("jump bypassing disabled")))
+ if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1 ||
+ is_too_expensive (_ ("jump bypassing disabled")))
return 0;
gcc_obstack_init (&gcse_obstack);
#if 0 /* We need to eliminate regs even if there is no rtl code,
for the sake of debugging information. */
- if (n_basic_blocks > 0)
+ if (n_basic_blocks > NUM_FIXED_BLOCKS)
#endif
{
build_insn_chain (get_insns ());
int i;
int *rts_order;
- rts_order = xmalloc (sizeof (int) * n_basic_blocks);
+ rts_order = xmalloc (sizeof (int) * (n_basic_blocks - NUM_FIXED_BLOCKS));
flow_reverse_top_sort_order_compute (rts_order);
- for (i = 0; i < n_basic_blocks; i++)
+ for (i = 0; i < n_basic_blocks - NUM_FIXED_BLOCKS; i++)
BB_INFO_BY_INDEX (rts_order [i])->rts_number = i;
free (rts_order);
}
return FALSE;
/* THEN is not EXIT. */
- if (then_bb->index < 0)
+ if (then_bb->index < NUM_FIXED_BLOCKS)
return FALSE;
/* ELSE is predicted or SUCC(ELSE) postdominates THEN. */
note = find_reg_note (BB_END (test_bb), REG_BR_PROB, NULL_RTX);
if (note && INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2)
;
- else if (else_succ->dest->index < 0
+ else if (else_succ->dest->index < NUM_FIXED_BLOCKS
|| dominated_by_p (CDI_POST_DOMINATORS, then_bb,
else_succ->dest))
;
}
qin = worklist;
- qend = &worklist[n_basic_blocks];
- qlen = n_basic_blocks;
+ qend = &worklist[n_basic_blocks - NUM_FIXED_BLOCKS];
+ qlen = n_basic_blocks - NUM_FIXED_BLOCKS;
/* Mark blocks which are predecessors of the exit block so that we
can easily identify them below. */
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
qin = qout = worklist
- = xmalloc (sizeof (basic_block) * (n_basic_blocks + 1));
+ = xmalloc (sizeof (basic_block) * n_basic_blocks);
/* Initialize a mapping from each edge to its index. */
for (i = 0; i < num_edges; i++)
}
/* Note that we do not use the last allocated element for our queue,
- as EXIT_BLOCK is never inserted into it. In fact the above allocation
- of n_basic_blocks + 1 elements is not necessary. */
+ as EXIT_BLOCK is never inserted into it. */
qin = worklist;
- qend = &worklist[n_basic_blocks];
- qlen = n_basic_blocks;
+ qend = &worklist[n_basic_blocks - NUM_FIXED_BLOCKS];
+ qlen = n_basic_blocks - NUM_FIXED_BLOCKS;
/* Iterate until the worklist is empty. */
while (qlen)
/* Allocate a worklist array/queue. Entries are only added to the
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
- qin = qout = worklist = xmalloc (sizeof (basic_block) * n_basic_blocks);
+ qin = qout = worklist =
+ xmalloc (sizeof (basic_block) * (n_basic_blocks - NUM_FIXED_BLOCKS));
/* We want a maximal solution. */
sbitmap_vector_ones (avout, last_basic_block);
}
qin = worklist;
- qend = &worklist[n_basic_blocks];
- qlen = n_basic_blocks;
+ qend = &worklist[n_basic_blocks - NUM_FIXED_BLOCKS];
+ qlen = n_basic_blocks - NUM_FIXED_BLOCKS;
/* Mark blocks which are successors of the entry block so that we
can easily identify them below. */
conditional has no loop header successors as not taken. */
if (!header_found)
FOR_EACH_EDGE (e, ei, bb->succs)
- if (e->dest->index < 0
+ if (e->dest->index < NUM_FIXED_BLOCKS
|| !flow_bb_inside_loop_p (loop, e->dest))
predict_edge
(e, PRED_LOOP_EXIT,
int *heads;
heads = xmalloc (sizeof (int) * last_basic_block);
- memset (heads, -1, sizeof (int) * last_basic_block);
+ memset (heads, ENTRY_BLOCK, sizeof (int) * last_basic_block);
heads[ENTRY_BLOCK_PTR->next_bb->index] = last_basic_block;
apply_return_prediction (heads);
edge_iterator ei;
int y;
- if (heads[bb->index] < 0)
+ if (heads[bb->index] == ENTRY_BLOCK)
{
/* This is first time we need this field in heads array; so
find first dominator that we do not post-dominate (we are
basic_block next_ai = get_immediate_dominator (CDI_DOMINATORS, bb);
int head;
- while (heads[next_ai->index] < 0)
+ while (heads[next_ai->index] == ENTRY_BLOCK)
{
if (!dominated_by_p (CDI_POST_DOMINATORS, next_ai, bb))
break;
while (next_ai != bb)
{
next_ai = ai;
- if (heads[ai->index] == ENTRY_BLOCK)
- ai = ENTRY_BLOCK_PTR;
- else
- ai = BASIC_BLOCK (heads[ai->index]);
+ ai = BASIC_BLOCK (heads[ai->index]);
heads[next_ai->index] = head;
}
}
if (y == last_basic_block)
return;
FOR_EACH_EDGE (e, ei, BASIC_BLOCK (y)->succs)
- if (e->dest->index >= 0
+ if (e->dest->index >= NUM_FIXED_BLOCKS
&& dominated_by_p (CDI_POST_DOMINATORS, e->dest, bb))
predict_edge_def (e, pred, taken);
}
/* The outermost "loop" includes the exit block, which we can not
look up via BASIC_BLOCK. Detect this and use EXIT_BLOCK_PTR
directly. Do the same for the entry block. */
- if (i == (unsigned)ENTRY_BLOCK)
- bb = ENTRY_BLOCK_PTR;
- else if (i == (unsigned)EXIT_BLOCK)
- bb = EXIT_BLOCK_PTR;
- else
- bb = BASIC_BLOCK (i);
+ bb = BASIC_BLOCK (i);
FOR_EACH_EDGE (e, ei, bb->preds)
{
{
FOR_EACH_EDGE (e, ei, bb->succs)
e->probability = (e->count * REG_BR_PROB_BASE + bb->count / 2) / bb->count;
- if (bb->index >= 0
+ if (bb->index >= NUM_FIXED_BLOCKS
&& block_ends_with_condjump_p (bb)
&& EDGE_COUNT (bb->succs) >= 2)
{
FOR_EACH_EDGE (e, ei, bb->succs)
e->probability = REG_BR_PROB_BASE / total;
}
- if (bb->index >= 0
+ if (bb->index >= NUM_FIXED_BLOCKS
&& block_ends_with_condjump_p (bb)
&& EDGE_COUNT (bb->succs) >= 2)
num_branches++, num_never_executed;
free (histogram_counts[t]);
}
-#define BB_TO_GCOV_INDEX(bb) ((bb)->index + 1)
+/* The entry basic block will be moved around so that it has index=1,
+ there is nothing at index 0 and the exit is at n_basic_block. */
+#define BB_TO_GCOV_INDEX(bb) ((bb)->index - 1)
/* When passed NULL as file_name, initialize.
When passed something else, output the necessary commands to change
line to LINE and offset to FILE_NAME. */
num_instrumented++;
}
- total_num_blocks += n_basic_blocks + 2;
+ total_num_blocks += n_basic_blocks;
if (dump_file)
fprintf (dump_file, "%d basic blocks\n", n_basic_blocks);
gcov_position_t offset;
offset = gcov_write_tag (GCOV_TAG_BLOCKS);
- for (i = 0; i != (unsigned) (n_basic_blocks + 2); i++)
+ for (i = 0; i != (unsigned) (n_basic_blocks); i++)
gcov_write_unsigned (0);
gcov_write_length (offset);
}
/* Keep all basic block indexes nonnegative in the gcov output.
Index 0 is used for entry block, last index is for exit block.
*/
- ENTRY_BLOCK_PTR->index = -1;
+ ENTRY_BLOCK_PTR->index = 1;
EXIT_BLOCK_PTR->index = last_basic_block;
/* Arcs */
all_vd = xmalloc (sizeof (struct value_data) * last_basic_block);
- visited = sbitmap_alloc (last_basic_block - (INVALID_BLOCK + 1));
+ visited = sbitmap_alloc (last_basic_block);
sbitmap_zero (visited);
FOR_EACH_BB (bb)
{
- SET_BIT (visited, bb->index - (INVALID_BLOCK + 1));
+ SET_BIT (visited, bb->index);
/* If a block has a single predecessor, that we've already
processed, begin with the value data that was live at
the end of the predecessor block. */
/* ??? Ought to use more intelligent queuing of blocks. */
- if (single_pred_p (bb)
- && TEST_BIT (visited,
- single_pred (bb)->index - (INVALID_BLOCK + 1))
+ if (single_pred_p (bb)
+ && TEST_BIT (visited, single_pred (bb)->index)
&& ! (single_pred_edge (bb)->flags & (EDGE_ABNORMAL_CALL | EDGE_EH)))
all_vd[bb->index] = all_vd[single_pred (bb)->index];
else
/* If the current function has no insns other than the prologue and
epilogue, then do not try to fill any delay slots. */
- if (n_basic_blocks == 0)
+ if (n_basic_blocks == NUM_FIXED_BLOCKS)
return;
/* Find the highest INSN_UID and allocate and initialize our map from
/* Taking care of this degenerate case makes the rest of
this code simpler. */
- if (n_basic_blocks == 0)
+ if (n_basic_blocks == NUM_FIXED_BLOCKS)
return;
sched_init (dump_file);
sp->is_speculative = 0;
sp->src_prob = 100;
- visited = sbitmap_alloc (last_basic_block - (INVALID_BLOCK + 1));
+ visited = sbitmap_alloc (last_basic_block);
for (i = trg + 1; i < current_nr_blocks; i++)
{
block = el.first_member[j]->src;
FOR_EACH_EDGE (e, ei, block->succs)
{
- if (!TEST_BIT (visited,
- e->dest->index - (INVALID_BLOCK + 1)))
+ if (!TEST_BIT (visited, e->dest->index))
{
for (k = 0; k < el.nr_members; k++)
if (e == el.first_member[k])
if (k >= el.nr_members)
{
bblst_table[bblst_last++] = e->dest;
- SET_BIT (visited,
- e->dest->index - (INVALID_BLOCK + 1));
+ SET_BIT (visited, e->dest->index);
update_idx++;
}
}
/* Compute regions for scheduling. */
if (reload_completed
- || n_basic_blocks == 1
+ || n_basic_blocks == NUM_FIXED_BLOCKS + 1
|| !flag_schedule_interblock
|| is_cfg_nonregular ())
{
/* Taking care of this degenerate case makes the rest of
this code simpler. */
- if (n_basic_blocks == 0)
+ if (n_basic_blocks == NUM_FIXED_BLOCKS)
return;
nr_inter = 0;
nr_spec = 0;
-
sched_init (dump_file);
init_regions ();
current_sched_info = ®ion_sched_info;
-
/* Schedule every region in the subroutine. */
for (rgn = 0; rgn < nr_regions; rgn++)
schedule_region (rgn);
static bool
ignore_bb_p (basic_block bb)
{
- if (bb->index < 0)
+ if (bb->index < NUM_FIXED_BLOCKS)
return true;
if (!maybe_hot_bb_p (bb))
return true;
void
tracer (unsigned int flags)
{
- if (n_basic_blocks <= 1)
+ if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
return;
cfg_layout_initialize (flags);
/* Initialize the basic block array. */
init_flow ();
profile_status = PROFILE_ABSENT;
- n_basic_blocks = 0;
- last_basic_block = 0;
+ n_basic_blocks = NUM_FIXED_BLOCKS;
+ last_basic_block = NUM_FIXED_BLOCKS;
VARRAY_BB_INIT (basic_block_info, initial_cfg_capacity, "basic_block_info");
/* Build a mapping of labels to their associated blocks. */
VARRAY_BB_INIT (label_to_block_map, initial_cfg_capacity,
"label to block map");
+ BASIC_BLOCK (ENTRY_BLOCK) = ENTRY_BLOCK_PTR;
+ BASIC_BLOCK (EXIT_BLOCK) = EXIT_BLOCK_PTR;
ENTRY_BLOCK_PTR->next_bb = EXIT_BLOCK_PTR;
EXIT_BLOCK_PTR->prev_bb = ENTRY_BLOCK_PTR;
}
factor_computed_gotos ();
/* Make sure there is always at least one block, even if it's empty. */
- if (n_basic_blocks == 0)
+ if (n_basic_blocks == NUM_FIXED_BLOCKS)
create_empty_bb (ENTRY_BLOCK_PTR);
/* Adjust the size of the array. */
/* Create an edge from entry to the first block with executable
statements in it. */
- make_edge (ENTRY_BLOCK_PTR, BASIC_BLOCK (0), EDGE_FALLTHRU);
+ make_edge (ENTRY_BLOCK_PTR, BASIC_BLOCK (NUM_FIXED_BLOCKS), EDGE_FALLTHRU);
/* Traverse the basic block array placing edges. */
FOR_EACH_BB (bb)
and undefined variable warnings quite right. */
if ((errorcount || sorrycount) && uid < 0)
{
- block_stmt_iterator bsi = bsi_start (BASIC_BLOCK (0));
+ block_stmt_iterator bsi =
+ bsi_start (BASIC_BLOCK (NUM_FIXED_BLOCKS));
tree stmt;
stmt = build1 (LABEL_EXPR, void_type_node, dest);
{
basic_block bb;
- bb = BASIC_BLOCK (0);
+ bb = BASIC_BLOCK (NUM_FIXED_BLOCKS);
if (bb && bb->loop_father)
print_loop (file, bb->loop_father, 0);
}
int last_bb = last_basic_block;
bool check_last_block = false;
- if (n_basic_blocks == 0)
+ if (n_basic_blocks == NUM_FIXED_BLOCKS)
return 0;
if (! blocks)
memset ((void *)dfa_stats_p, 0, sizeof (struct dfa_stats_d));
/* Walk all the trees in the function counting references. Start at
- basic block 0, but don't stop at block boundaries. */
+ basic block NUM_FIXED_BLOCKS, but don't stop at block boundaries. */
pset = pointer_set_create ();
- for (i = bsi_start (BASIC_BLOCK (0)); !bsi_end_p (i); bsi_next (&i))
+ for (i = bsi_start (BASIC_BLOCK (NUM_FIXED_BLOCKS));
+ !bsi_end_p (i); bsi_next (&i))
walk_tree (bsi_stmt_ptr (i), collect_dfa_stats_r, (void *) dfa_stats_p,
pset);
bsi.tsi = tsi_start (bb->stmt_list);
else
{
- gcc_assert (bb->index < 0);
+ gcc_assert (bb->index < NUM_FIXED_BLOCKS);
bsi.tsi.ptr = NULL;
bsi.tsi.container = NULL;
}
if (!bb->stmt_list)
{
- gcc_assert (bb->index < 0);
+ gcc_assert (bb->index < NUM_FIXED_BLOCKS);
bsi.tsi.ptr = NULL;
bsi.tsi.container = NULL;
return bsi;
bsi.tsi = tsi_last (bb->stmt_list);
else
{
- gcc_assert (bb->index < 0);
+ gcc_assert (bb->index < NUM_FIXED_BLOCKS);
bsi.tsi.ptr = NULL;
bsi.tsi.container = NULL;
}
outer ones, and also that we do not try to visit a removed
block. */
bb_order = blocks_in_phiopt_order ();
- n = n_basic_blocks;
+ n = n_basic_blocks - NUM_FIXED_BLOCKS;
- for (i = 0; i < n; i++)
+ for (i = 0; i < n; i++)
{
tree cond_expr;
tree phi;
{
basic_block x, y;
basic_block *order = xmalloc (sizeof (basic_block) * n_basic_blocks);
- unsigned n = n_basic_blocks, np, i;
- sbitmap visited = sbitmap_alloc (last_basic_block + 2);
+ unsigned n = n_basic_blocks - NUM_FIXED_BLOCKS;
+ unsigned np, i;
+ sbitmap visited = sbitmap_alloc (last_basic_block);
-#define MARK_VISITED(BB) (SET_BIT (visited, (BB)->index + 2))
-#define VISITED_P(BB) (TEST_BIT (visited, (BB)->index + 2))
+#define MARK_VISITED(BB) (SET_BIT (visited, (BB)->index))
+#define VISITED_P(BB) (TEST_BIT (visited, (BB)->index))
sbitmap_zero (visited);
return vr1->op == vr2->op;
}
-
/* Given an expression E, return the rank of the expression. */
static unsigned int
}
/* Set up rank for each BB */
- for (i = 0; i < n_basic_blocks; i++)
+ for (i = 0; i < n_basic_blocks - NUM_FIXED_BLOCKS; i++)
bb_rank[bbs[i]] = ++rank << 16;
free (bbs);
/* Compute reverse completion order of depth first search of the CFG
so that the data-flow runs faster. */
- rc_order = xmalloc (n_basic_blocks * sizeof (int));
+ rc_order = xmalloc ((n_basic_blocks - NUM_FIXED_BLOCKS) * sizeof (int));
bb_order = xmalloc (last_basic_block * sizeof (int));
flow_depth_first_order_compute (NULL, rc_order);
- for (i = 0; i < n_basic_blocks; i++)
+ for (i = 0; i < n_basic_blocks - NUM_FIXED_BLOCKS; i++)
bb_order[rc_order[i]] = i;
free (rc_order);