used in, so that you only have one place you can sink it to. */
static bool
-all_immediate_uses_same_place (gimple stmt)
+all_immediate_uses_same_place (def_operand_p def_p)
{
- gimple firstuse = NULL;
- ssa_op_iter op_iter;
+ tree var = DEF_FROM_PTR (def_p);
imm_use_iterator imm_iter;
use_operand_p use_p;
- tree var;
- FOR_EACH_SSA_TREE_OPERAND (var, stmt, op_iter, SSA_OP_ALL_DEFS)
+ gimple firstuse = NULL;
+ FOR_EACH_IMM_USE_FAST (use_p, imm_iter, var)
{
- FOR_EACH_IMM_USE_FAST (use_p, imm_iter, var)
- {
- if (is_gimple_debug (USE_STMT (use_p)))
- continue;
- if (firstuse == NULL)
- firstuse = USE_STMT (use_p);
- else
- if (firstuse != USE_STMT (use_p))
- return false;
- }
+ if (is_gimple_debug (USE_STMT (use_p)))
+ continue;
+ if (firstuse == NULL)
+ firstuse = USE_STMT (use_p);
+ else
+ if (firstuse != USE_STMT (use_p))
+ return false;
}
return true;
/* Find the nearest common dominator of all of the immediate uses in IMM. */
static basic_block
-nearest_common_dominator_of_uses (gimple stmt, bool *debug_stmts)
+nearest_common_dominator_of_uses (def_operand_p def_p, bool *debug_stmts)
{
+ tree var = DEF_FROM_PTR (def_p);
bitmap blocks = BITMAP_ALLOC (NULL);
basic_block commondom;
unsigned int j;
bitmap_iterator bi;
- ssa_op_iter op_iter;
imm_use_iterator imm_iter;
use_operand_p use_p;
- tree var;
- bitmap_clear (blocks);
- FOR_EACH_SSA_TREE_OPERAND (var, stmt, op_iter, SSA_OP_ALL_DEFS)
+ FOR_EACH_IMM_USE_FAST (use_p, imm_iter, var)
{
- FOR_EACH_IMM_USE_FAST (use_p, imm_iter, var)
- {
- gimple usestmt = USE_STMT (use_p);
- basic_block useblock;
+ gimple usestmt = USE_STMT (use_p);
+ basic_block useblock;
- if (gimple_code (usestmt) == GIMPLE_PHI)
- {
- int idx = PHI_ARG_INDEX_FROM_USE (use_p);
+ if (gimple_code (usestmt) == GIMPLE_PHI)
+ {
+ int idx = PHI_ARG_INDEX_FROM_USE (use_p);
- useblock = gimple_phi_arg_edge (usestmt, idx)->src;
- }
- else if (is_gimple_debug (usestmt))
- {
- *debug_stmts = true;
- continue;
- }
- else
- {
- useblock = gimple_bb (usestmt);
- }
+ useblock = gimple_phi_arg_edge (usestmt, idx)->src;
+ }
+ else if (is_gimple_debug (usestmt))
+ {
+ *debug_stmts = true;
+ continue;
+ }
+ else
+ {
+ useblock = gimple_bb (usestmt);
+ }
- /* Short circuit. Nothing dominates the entry block. */
- if (useblock == ENTRY_BLOCK_PTR_FOR_FN (cfun))
- {
- BITMAP_FREE (blocks);
- return NULL;
- }
- bitmap_set_bit (blocks, useblock->index);
+ /* Short circuit. Nothing dominates the entry block. */
+ if (useblock == ENTRY_BLOCK_PTR_FOR_FN (cfun))
+ {
+ BITMAP_FREE (blocks);
+ return NULL;
}
+ bitmap_set_bit (blocks, useblock->index);
}
commondom = BASIC_BLOCK_FOR_FN (cfun, bitmap_first_set_bit (blocks));
EXECUTE_IF_SET_IN_BITMAP (blocks, 0, j, bi)
be seen by an external routine that needs it depending on where it gets
moved to.
- We don't want to sink loads from memory.
-
We can't sink statements that end basic blocks without splitting the
incoming edge for the sink location to place it there.
if (stmt_ends_bb_p (stmt)
|| gimple_has_side_effects (stmt)
|| gimple_has_volatile_ops (stmt)
- || (gimple_vuse (stmt) && !gimple_vdef (stmt))
|| (cfun->has_local_explicit_reg_vars
&& TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt))) == BLKmode))
return false;
/* If stmt is a store the one and only use needs to be the VOP
merging PHI node. */
- if (gimple_vdef (stmt))
+ if (virtual_operand_p (DEF_FROM_PTR (def_p)))
{
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
{
common dominator of all the immediate uses. For PHI nodes, we have to
find the nearest common dominator of all of the predecessor blocks, since
that is where insertion would have to take place. */
- else if (!all_immediate_uses_same_place (stmt))
+ else if (gimple_vuse (stmt)
+ || !all_immediate_uses_same_place (def_p))
{
bool debug_stmts = false;
- basic_block commondom = nearest_common_dominator_of_uses (stmt,
+ basic_block commondom = nearest_common_dominator_of_uses (def_p,
&debug_stmts);
if (commondom == frombb)
return false;
+ /* If this is a load then do not sink past any stores.
+ ??? This is overly simple but cheap. We basically look
+ for an existing load with the same VUSE in the path to one
+ of the sink candidate blocks and we adjust commondom to the
+ nearest to commondom. */
+ if (gimple_vuse (stmt))
+ {
+ imm_use_iterator imm_iter;
+ use_operand_p use_p;
+ basic_block found = NULL;
+ FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_vuse (stmt))
+ {
+ gimple use_stmt = USE_STMT (use_p);
+ basic_block bb = gimple_bb (use_stmt);
+ /* For PHI nodes the block we know sth about
+ is the incoming block with the use. */
+ if (gimple_code (use_stmt) == GIMPLE_PHI)
+ bb = EDGE_PRED (bb, PHI_ARG_INDEX_FROM_USE (use_p))->src;
+ /* Any dominator of commondom would be ok with
+ adjusting commondom to that block. */
+ bb = nearest_common_dominator (CDI_DOMINATORS, bb, commondom);
+ if (!found)
+ found = bb;
+ else if (dominated_by_p (CDI_DOMINATORS, bb, found))
+ found = bb;
+ /* If we can't improve, stop. */
+ if (found == commondom)
+ break;
+ }
+ commondom = found;
+ if (commondom == frombb)
+ return false;
+ }
+
/* Our common dominator has to be dominated by frombb in order to be a
trivially safe place to put this statement, since it has multiple
uses. */