/* Gimple IR support functions.
- Copyright 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+ Copyright 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
Contributed by Aldy Hernandez <aldyh@redhat.com>
This file is part of GCC.
#include "hard-reg-set.h"
#include "basic-block.h"
#include "gimple.h"
-#include "toplev.h"
#include "diagnostic.h"
#include "tree-flow.h"
#include "value-prof.h"
htab_t gimple_canonical_types;
static GTY((if_marked ("tree_int_map_marked_p"), param_is (struct tree_int_map)))
htab_t type_hash_cache;
-
-/* Global type comparison cache. This is by TYPE_UID for space efficiency
- and thus cannot use and does not need GC. */
-static htab_t gtc_visited;
-static struct obstack gtc_ob;
+static GTY((if_marked ("tree_int_map_marked_p"), param_is (struct tree_int_map)))
+ htab_t canonical_type_hash_cache;
/* All the tuples have their operand vector (if present) at the very bottom
of the structure. Therefore, the offset required to find the
"assignments",
"phi nodes",
"conditionals",
- "sequences",
"everything else"
};
#endif /* GATHER_STATISTICS */
-/* A cache of gimple_seq objects. Sequences are created and destroyed
- fairly often during gimplification. */
-static GTY ((deletable)) struct gimple_seq_d *gimple_seq_cache;
-
/* Private API manipulation functions shared only with some
other files. */
extern void gimple_set_stored_syms (gimple, bitmap, bitmap_obstack *);
/* Do not call gimple_set_modified here as it has other side
effects and this tuple is still not completely built. */
stmt->gsbase.modified = 1;
+ gimple_init_singleton (stmt);
return stmt;
}
pt_solution_reset (gimple_call_clobber_set (s));
}
-/* Helper for gimple_build_call, gimple_build_call_vec and
- gimple_build_call_from_tree. Build the basic components of a
- GIMPLE_CALL statement to function FN with NARGS arguments. */
+/* Helper for gimple_build_call, gimple_build_call_valist,
+ gimple_build_call_vec and gimple_build_call_from_tree. Build the basic
+ components of a GIMPLE_CALL statement to function FN with NARGS
+ arguments. */
static inline gimple
gimple_build_call_1 (tree fn, unsigned nargs)
if (TREE_CODE (fn) == FUNCTION_DECL)
fn = build_fold_addr_expr (fn);
gimple_set_op (s, 1, fn);
+ gimple_call_set_fntype (s, TREE_TYPE (TREE_TYPE (fn)));
gimple_call_reset_alias_info (s);
return s;
}
}
+/* Build a GIMPLE_CALL statement to function FN. NARGS is the number of
+ arguments. AP contains the arguments. */
+
+gimple
+gimple_build_call_valist (tree fn, unsigned nargs, va_list ap)
+{
+ gimple call;
+ unsigned i;
+
+ gcc_assert (TREE_CODE (fn) == FUNCTION_DECL || is_gimple_call_addr (fn));
+
+ call = gimple_build_call_1 (fn, nargs);
+
+ for (i = 0; i < nargs; i++)
+ gimple_call_set_arg (call, i, va_arg (ap, tree));
+
+ return call;
+}
+
+
+/* Helper for gimple_build_call_internal and gimple_build_call_internal_vec.
+ Build the basic components of a GIMPLE_CALL statement to internal
+ function FN with NARGS arguments. */
+
+static inline gimple
+gimple_build_call_internal_1 (enum internal_fn fn, unsigned nargs)
+{
+ gimple s = gimple_build_with_ops (GIMPLE_CALL, ERROR_MARK, nargs + 3);
+ s->gsbase.subcode |= GF_CALL_INTERNAL;
+ gimple_call_set_internal_fn (s, fn);
+ gimple_call_reset_alias_info (s);
+ return s;
+}
+
+
+/* Build a GIMPLE_CALL statement to internal function FN. NARGS is
+ the number of arguments. The ... are the arguments. */
+
+gimple
+gimple_build_call_internal (enum internal_fn fn, unsigned nargs, ...)
+{
+ va_list ap;
+ gimple call;
+ unsigned i;
+
+ call = gimple_build_call_internal_1 (fn, nargs);
+ va_start (ap, nargs);
+ for (i = 0; i < nargs; i++)
+ gimple_call_set_arg (call, i, va_arg (ap, tree));
+ va_end (ap);
+
+ return call;
+}
+
+
+/* Build a GIMPLE_CALL statement to internal function FN with the arguments
+ specified in vector ARGS. */
+
+gimple
+gimple_build_call_internal_vec (enum internal_fn fn, VEC(tree, heap) *args)
+{
+ unsigned i, nargs;
+ gimple call;
+
+ nargs = VEC_length (tree, args);
+ call = gimple_build_call_internal_1 (fn, nargs);
+ for (i = 0; i < nargs; i++)
+ gimple_call_set_arg (call, i, VEC_index (tree, args, i));
+
+ return call;
+}
+
+
/* Build a GIMPLE_CALL statement from CALL_EXPR T. Note that T is
assumed to be in GIMPLE form already. Minimal checking is done of
this fact. */
/* Carry all the CALL_EXPR flags to the new GIMPLE_CALL. */
gimple_call_set_chain (call, CALL_EXPR_STATIC_CHAIN (t));
gimple_call_set_tail (call, CALL_EXPR_TAILCALL (t));
- gimple_call_set_cannot_inline (call, CALL_CANNOT_INLINE_P (t));
gimple_call_set_return_slot_opt (call, CALL_EXPR_RETURN_SLOT_OPT (t));
- gimple_call_set_from_thunk (call, CALL_FROM_THUNK_P (t));
+ if (fndecl
+ && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
+ && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA
+ || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA_WITH_ALIGN))
+ gimple_call_set_alloca_for_var (call, CALL_ALLOCA_FOR_VAR_P (t));
+ else
+ gimple_call_set_from_thunk (call, CALL_FROM_THUNK_P (t));
gimple_call_set_va_arg_pack (call, CALL_EXPR_VA_ARG_PACK (t));
gimple_call_set_nothrow (call, TREE_NOTHROW (t));
gimple_set_no_warning (call, TREE_NO_WARNING (t));
return p;
}
+/* Build a GIMPLE_EH_ELSE statement. */
+
+gimple
+gimple_build_eh_else (gimple_seq n_body, gimple_seq e_body)
+{
+ gimple p = gimple_alloc (GIMPLE_EH_ELSE, 0);
+ gimple_eh_else_set_n_body (p, n_body);
+ gimple_eh_else_set_e_body (p, e_body);
+ return p;
+}
+
/* Build a GIMPLE_TRY statement.
EVAL is the expression to evaluate.
}
+/* Build a new GIMPLE_DEBUG_SOURCE_BIND statement.
+
+ VAR is bound to VALUE; block and location are taken from STMT. */
+
+gimple
+gimple_build_debug_source_bind_stat (tree var, tree value,
+ gimple stmt MEM_STAT_DECL)
+{
+ gimple p = gimple_build_with_ops_stat (GIMPLE_DEBUG,
+ (unsigned)GIMPLE_DEBUG_SOURCE_BIND, 2
+ PASS_MEM_STAT);
+
+ gimple_debug_source_bind_set_var (p, var);
+ gimple_debug_source_bind_set_value (p, value);
+ if (stmt)
+ {
+ gimple_set_block (p, gimple_block (stmt));
+ gimple_set_location (p, gimple_location (stmt));
+ }
+
+ return p;
+}
+
+
/* Build a GIMPLE_OMP_CRITICAL statement.
BODY is the sequence of statements for which only one thread can execute.
return p;
}
+/* Build a GIMPLE_TRANSACTION statement. */
+
+gimple
+gimple_build_transaction (gimple_seq body, tree label)
+{
+ gimple p = gimple_alloc (GIMPLE_TRANSACTION, 0);
+ gimple_transaction_set_body (p, body);
+ gimple_transaction_set_label (p, label);
+ return p;
+}
+
/* Build a GIMPLE_PREDICT statement. PREDICT is one of the predictors from
predict.def, OUTCOME is NOT_TAKEN or TAKEN. */
#endif /* ENABLE_GIMPLE_CHECKING */
-/* Allocate a new GIMPLE sequence in GC memory and return it. If
- there are free sequences in GIMPLE_SEQ_CACHE return one of those
- instead. */
-
-gimple_seq
-gimple_seq_alloc (void)
-{
- gimple_seq seq = gimple_seq_cache;
- if (seq)
- {
- gimple_seq_cache = gimple_seq_cache->next_free;
- gcc_assert (gimple_seq_cache != seq);
- memset (seq, 0, sizeof (*seq));
- }
- else
- {
- seq = ggc_alloc_cleared_gimple_seq_d ();
-#ifdef GATHER_STATISTICS
- gimple_alloc_counts[(int) gimple_alloc_kind_seq]++;
- gimple_alloc_sizes[(int) gimple_alloc_kind_seq] += sizeof (*seq);
-#endif
- }
-
- return seq;
-}
-
-/* Return SEQ to the free pool of GIMPLE sequences. */
-
-void
-gimple_seq_free (gimple_seq seq)
-{
- if (seq == NULL)
- return;
-
- gcc_assert (gimple_seq_first (seq) == NULL);
- gcc_assert (gimple_seq_last (seq) == NULL);
-
- /* If this triggers, it's a sign that the same list is being freed
- twice. */
- gcc_assert (seq != gimple_seq_cache || gimple_seq_cache == NULL);
-
- /* Add SEQ to the pool of free sequences. */
- seq->next_free = gimple_seq_cache;
- gimple_seq_cache = seq;
-}
-
-
/* Link gimple statement GS to the end of the sequence *SEQ_P. If
*SEQ_P is NULL, a new sequence is allocated. */
gimple_seq_add_stmt (gimple_seq *seq_p, gimple gs)
{
gimple_stmt_iterator si;
-
if (gs == NULL)
return;
- if (*seq_p == NULL)
- *seq_p = gimple_seq_alloc ();
-
si = gsi_last (*seq_p);
gsi_insert_after (&si, gs, GSI_NEW_STMT);
}
gimple_seq_add_seq (gimple_seq *dst_p, gimple_seq src)
{
gimple_stmt_iterator si;
-
if (src == NULL)
return;
- if (*dst_p == NULL)
- *dst_p = gimple_seq_alloc ();
-
si = gsi_last (*dst_p);
gsi_insert_seq_after (&si, src, GSI_NEW_STMT);
}
gimple_seq_copy (gimple_seq src)
{
gimple_stmt_iterator gsi;
- gimple_seq new_seq = gimple_seq_alloc ();
+ gimple_seq new_seq = NULL;
gimple stmt;
for (gsi = gsi_start (src); !gsi_end_p (gsi); gsi_next (&gsi))
}
-/* Walk all the statements in the sequence SEQ calling walk_gimple_stmt
+/* Walk all the statements in the sequence *PSEQ calling walk_gimple_stmt
on each one. WI is as in walk_gimple_stmt.
- If walk_gimple_stmt returns non-NULL, the walk is stopped, the
- value is stored in WI->CALLBACK_RESULT and the statement that
- produced the value is returned.
+ If walk_gimple_stmt returns non-NULL, the walk is stopped, and the
+ value is stored in WI->CALLBACK_RESULT. Also, the statement that
+ produced the value is returned if this statement has not been
+ removed by a callback (wi->removed_stmt). If the statement has
+ been removed, NULL is returned.
Otherwise, all the statements are walked and NULL returned. */
gimple
-walk_gimple_seq (gimple_seq seq, walk_stmt_fn callback_stmt,
- walk_tree_fn callback_op, struct walk_stmt_info *wi)
+walk_gimple_seq_mod (gimple_seq *pseq, walk_stmt_fn callback_stmt,
+ walk_tree_fn callback_op, struct walk_stmt_info *wi)
{
gimple_stmt_iterator gsi;
- for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
+ for (gsi = gsi_start (*pseq); !gsi_end_p (gsi); )
{
tree ret = walk_gimple_stmt (&gsi, callback_stmt, callback_op, wi);
if (ret)
to hold it. */
gcc_assert (wi);
wi->callback_result = ret;
- return gsi_stmt (gsi);
+
+ return wi->removed_stmt ? NULL : gsi_stmt (gsi);
}
+
+ if (!wi->removed_stmt)
+ gsi_next (&gsi);
}
if (wi)
}
+/* Like walk_gimple_seq_mod, but ensure that the head of SEQ isn't
+ changed by the callbacks. */
+
+gimple
+walk_gimple_seq (gimple_seq seq, walk_stmt_fn callback_stmt,
+ walk_tree_fn callback_op, struct walk_stmt_info *wi)
+{
+ gimple_seq seq2 = seq;
+ gimple ret = walk_gimple_seq_mod (&seq2, callback_stmt, callback_op, wi);
+ gcc_assert (seq2 == seq);
+ return ret;
+}
+
+
/* Helper function for walk_gimple_stmt. Walk operands of a GIMPLE_ASM. */
static tree
tree lhs = gimple_assign_lhs (stmt);
wi->val_only
= (is_gimple_reg_type (TREE_TYPE (lhs)) && !is_gimple_reg (lhs))
- || !gimple_assign_single_p (stmt);
+ || gimple_assign_rhs_class (stmt) != GIMPLE_SINGLE_RHS;
}
for (i = 1; i < gimple_num_ops (stmt); i++)
may use a COMPONENT_REF on the LHS. */
if (wi)
{
- /* If the RHS has more than 1 operand, it is not appropriate
- for the memory. */
- wi->val_only = !is_gimple_mem_rhs (gimple_assign_rhs1 (stmt))
- || !gimple_assign_single_p (stmt);
+ /* If the RHS is of a non-renamable type or is a register variable,
+ we may use a COMPONENT_REF on the LHS. */
+ tree rhs1 = gimple_assign_rhs1 (stmt);
+ wi->val_only
+ = (is_gimple_reg_type (TREE_TYPE (rhs1)) && !is_gimple_reg (rhs1))
+ || gimple_assign_rhs_class (stmt) != GIMPLE_SINGLE_RHS;
wi->is_lhs = true;
}
for (i = 0; i < gimple_call_num_args (stmt); i++)
{
if (wi)
- wi->val_only = is_gimple_reg_type (gimple_call_arg (stmt, i));
+ wi->val_only
+ = is_gimple_reg_type (TREE_TYPE (gimple_call_arg (stmt, i)));
ret = walk_tree (gimple_call_arg_ptr (stmt, i), callback_op, wi,
pset);
if (ret)
if (wi)
{
wi->is_lhs = true;
- wi->val_only = is_gimple_reg_type (gimple_call_lhs (stmt));
+ wi->val_only
+ = is_gimple_reg_type (TREE_TYPE (gimple_call_lhs (stmt)));
}
ret = walk_tree (gimple_call_lhs_ptr (stmt), callback_op, wi, pset);
return ret;
break;
+ case GIMPLE_TRANSACTION:
+ ret = walk_tree (gimple_transaction_label_ptr (stmt), callback_op,
+ wi, pset);
+ if (ret)
+ return ret;
+ break;
+
/* Tuples that do not have operands. */
case GIMPLE_NOP:
case GIMPLE_RESX:
gimple stmt = gsi_stmt (*gsi);
if (wi)
- wi->gsi = *gsi;
+ {
+ wi->gsi = *gsi;
+ wi->removed_stmt = false;
- if (wi && wi->want_locations && gimple_has_location (stmt))
- input_location = gimple_location (stmt);
+ if (wi->want_locations && gimple_has_location (stmt))
+ input_location = gimple_location (stmt);
+ }
ret = NULL;
a value to return. */
gcc_assert (tree_ret == NULL);
+ if (wi && wi->removed_stmt)
+ return NULL;
+
/* Re-read stmt in case the callback changed it. */
stmt = gsi_stmt (*gsi);
}
switch (gimple_code (stmt))
{
case GIMPLE_BIND:
- ret = walk_gimple_seq (gimple_bind_body (stmt), callback_stmt,
- callback_op, wi);
+ ret = walk_gimple_seq_mod (gimple_bind_body_ptr (stmt), callback_stmt,
+ callback_op, wi);
if (ret)
return wi->callback_result;
break;
case GIMPLE_CATCH:
- ret = walk_gimple_seq (gimple_catch_handler (stmt), callback_stmt,
- callback_op, wi);
+ ret = walk_gimple_seq_mod (gimple_catch_handler_ptr (stmt), callback_stmt,
+ callback_op, wi);
if (ret)
return wi->callback_result;
break;
case GIMPLE_EH_FILTER:
- ret = walk_gimple_seq (gimple_eh_filter_failure (stmt), callback_stmt,
+ ret = walk_gimple_seq_mod (gimple_eh_filter_failure_ptr (stmt), callback_stmt,
callback_op, wi);
if (ret)
return wi->callback_result;
break;
+ case GIMPLE_EH_ELSE:
+ ret = walk_gimple_seq_mod (gimple_eh_else_n_body_ptr (stmt),
+ callback_stmt, callback_op, wi);
+ if (ret)
+ return wi->callback_result;
+ ret = walk_gimple_seq_mod (gimple_eh_else_e_body_ptr (stmt),
+ callback_stmt, callback_op, wi);
+ if (ret)
+ return wi->callback_result;
+ break;
+
case GIMPLE_TRY:
- ret = walk_gimple_seq (gimple_try_eval (stmt), callback_stmt, callback_op,
+ ret = walk_gimple_seq_mod (gimple_try_eval_ptr (stmt), callback_stmt, callback_op,
wi);
if (ret)
return wi->callback_result;
- ret = walk_gimple_seq (gimple_try_cleanup (stmt), callback_stmt,
+ ret = walk_gimple_seq_mod (gimple_try_cleanup_ptr (stmt), callback_stmt,
callback_op, wi);
if (ret)
return wi->callback_result;
break;
case GIMPLE_OMP_FOR:
- ret = walk_gimple_seq (gimple_omp_for_pre_body (stmt), callback_stmt,
+ ret = walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt), callback_stmt,
callback_op, wi);
if (ret)
return wi->callback_result;
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
- ret = walk_gimple_seq (gimple_omp_body (stmt), callback_stmt, callback_op,
- wi);
+ ret = walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), callback_stmt,
+ callback_op, wi);
if (ret)
return wi->callback_result;
break;
case GIMPLE_WITH_CLEANUP_EXPR:
- ret = walk_gimple_seq (gimple_wce_cleanup (stmt), callback_stmt,
+ ret = walk_gimple_seq_mod (gimple_wce_cleanup_ptr (stmt), callback_stmt,
callback_op, wi);
if (ret)
return wi->callback_result;
break;
+ case GIMPLE_TRANSACTION:
+ ret = walk_gimple_seq_mod (gimple_transaction_body_ptr (stmt),
+ callback_stmt, callback_op, wi);
+ if (ret)
+ return wi->callback_result;
+ break;
+
default:
gcc_assert (!gimple_has_substatements (stmt));
break;
return (gimple_body (fndecl) || (fn && fn->cfg));
}
+/* Return true if calls C1 and C2 are known to go to the same function. */
+
+bool
+gimple_call_same_target_p (const_gimple c1, const_gimple c2)
+{
+ if (gimple_call_internal_p (c1))
+ return (gimple_call_internal_p (c2)
+ && gimple_call_internal_fn (c1) == gimple_call_internal_fn (c2));
+ else
+ return (gimple_call_fn (c1) == gimple_call_fn (c2)
+ || (gimple_call_fndecl (c1)
+ && gimple_call_fndecl (c1) == gimple_call_fndecl (c2)));
+}
+
/* Detect flags from a GIMPLE_CALL. This is just like
call_expr_flags, but for gimple tuples. */
{
int flags;
tree decl = gimple_call_fndecl (stmt);
- tree t;
if (decl)
flags = flags_from_decl_or_type (decl);
+ else if (gimple_call_internal_p (stmt))
+ flags = internal_fn_flags (gimple_call_internal_fn (stmt));
else
- {
- t = TREE_TYPE (gimple_call_fn (stmt));
- if (t && TREE_CODE (t) == POINTER_TYPE)
- flags = flags_from_decl_or_type (TREE_TYPE (t));
- else
- flags = 0;
- }
+ flags = flags_from_decl_or_type (gimple_call_fntype (stmt));
if (stmt->gsbase.subcode & GF_CALL_NOTHROW)
flags |= ECF_NOTHROW;
return flags;
}
+/* Return the "fn spec" string for call STMT. */
+
+static tree
+gimple_call_fnspec (const_gimple stmt)
+{
+ tree type, attr;
+
+ type = gimple_call_fntype (stmt);
+ if (!type)
+ return NULL_TREE;
+
+ attr = lookup_attribute ("fn spec", TYPE_ATTRIBUTES (type));
+ if (!attr)
+ return NULL_TREE;
+
+ return TREE_VALUE (TREE_VALUE (attr));
+}
+
/* Detects argument flags for argument number ARG on call STMT. */
int
gimple_call_arg_flags (const_gimple stmt, unsigned arg)
{
- tree type = TREE_TYPE (TREE_TYPE (gimple_call_fn (stmt)));
- tree attr = lookup_attribute ("fn spec", TYPE_ATTRIBUTES (type));
- if (!attr)
- return 0;
+ tree attr = gimple_call_fnspec (stmt);
- attr = TREE_VALUE (TREE_VALUE (attr));
- if (1 + arg >= (unsigned) TREE_STRING_LENGTH (attr))
+ if (!attr || 1 + arg >= (unsigned) TREE_STRING_LENGTH (attr))
return 0;
switch (TREE_STRING_POINTER (attr)[1 + arg])
int
gimple_call_return_flags (const_gimple stmt)
{
- tree type;
- tree attr = NULL_TREE;
+ tree attr;
if (gimple_call_flags (stmt) & ECF_MALLOC)
return ERF_NOALIAS;
- type = TREE_TYPE (TREE_TYPE (gimple_call_fn (stmt)));
- attr = lookup_attribute ("fn spec", TYPE_ATTRIBUTES (type));
- if (!attr)
- return 0;
-
- attr = TREE_VALUE (TREE_VALUE (attr));
- if (TREE_STRING_LENGTH (attr) < 1)
+ attr = gimple_call_fnspec (stmt);
+ if (!attr || TREE_STRING_LENGTH (attr) < 1)
return 0;
switch (TREE_STRING_POINTER (attr)[0])
tree lhs = gimple_assign_lhs (stmt);
gimple new_stmt = gimple_alloc (gimple_code (stmt), new_rhs_ops + 1);
memcpy (new_stmt, stmt, gimple_size (gimple_code (stmt)));
+ gimple_init_singleton (new_stmt);
gsi_replace (gsi, new_stmt, true);
stmt = new_stmt;
/* Return a deep copy of statement STMT. All the operands from STMT
are reallocated and copied using unshare_expr. The DEF, USE, VDEF
- and VUSE operand arrays are set to empty in the new copy. */
+ and VUSE operand arrays are set to empty in the new copy. The new
+ copy isn't part of any sequence. */
gimple
gimple_copy (gimple stmt)
/* Shallow copy all the fields from STMT. */
memcpy (copy, stmt, gimple_size (code));
+ gimple_init_singleton (copy);
/* If STMT has sub-statements, deep-copy them as well. */
if (gimple_has_substatements (stmt))
gimple_eh_filter_set_types (copy, t);
break;
+ case GIMPLE_EH_ELSE:
+ new_seq = gimple_seq_copy (gimple_eh_else_n_body (stmt));
+ gimple_eh_else_set_n_body (copy, new_seq);
+ new_seq = gimple_seq_copy (gimple_eh_else_e_body (stmt));
+ gimple_eh_else_set_e_body (copy, new_seq);
+ break;
+
case GIMPLE_TRY:
new_seq = gimple_seq_copy (gimple_try_eval (stmt));
gimple_try_set_eval (copy, new_seq);
gimple_omp_set_body (copy, new_seq);
break;
+ case GIMPLE_TRANSACTION:
+ new_seq = gimple_seq_copy (gimple_transaction_body (stmt));
+ gimple_transaction_set_body (copy, new_seq);
+ break;
+
case GIMPLE_WITH_CLEANUP_EXPR:
new_seq = gimple_seq_copy (gimple_wce_cleanup (stmt));
gimple_wce_set_cleanup (copy, new_seq);
}
-/* Set the MODIFIED flag to MODIFIEDP, iff the gimple statement G has
- a MODIFIED field. */
-
-void
-gimple_set_modified (gimple s, bool modifiedp)
-{
- if (gimple_has_ops (s))
- {
- s->gsbase.modified = (unsigned) modifiedp;
-
- if (modifiedp
- && cfun->gimple_df
- && is_gimple_call (s)
- && gimple_call_noreturn_p (s))
- VEC_safe_push (gimple, gc, MODIFIED_NORETURN_CALLS (cfun), s);
- }
-}
-
-
/* Return true if statement S has side-effects. We consider a
statement to have side effects if:
bool
gimple_has_side_effects (const_gimple s)
{
- unsigned i;
-
if (is_gimple_debug (s))
return false;
if (gimple_has_volatile_ops (s))
return true;
- if (is_gimple_call (s))
- {
- unsigned nargs = gimple_call_num_args (s);
-
- if (!(gimple_call_flags (s) & (ECF_CONST | ECF_PURE)))
- return true;
- else if (gimple_call_flags (s) & ECF_LOOPING_CONST_OR_PURE)
- /* An infinite loop is considered a side effect. */
- return true;
-
- if (gimple_call_lhs (s)
- && TREE_SIDE_EFFECTS (gimple_call_lhs (s)))
- {
- gcc_assert (gimple_has_volatile_ops (s));
- return true;
- }
-
- if (TREE_SIDE_EFFECTS (gimple_call_fn (s)))
- return true;
-
- for (i = 0; i < nargs; i++)
- if (TREE_SIDE_EFFECTS (gimple_call_arg (s, i)))
- {
- gcc_assert (gimple_has_volatile_ops (s));
- return true;
- }
-
- return false;
- }
- else
- {
- for (i = 0; i < gimple_num_ops (s); i++)
- if (TREE_SIDE_EFFECTS (gimple_op (s, i)))
- {
- gcc_assert (gimple_has_volatile_ops (s));
- return true;
- }
- }
-
- return false;
-}
-
-/* Return true if the RHS of statement S has side effects.
- We may use it to determine if it is admissable to replace
- an assignment or call with a copy of a previously-computed
- value. In such cases, side-effects due the the LHS are
- preserved. */
-
-bool
-gimple_rhs_has_side_effects (const_gimple s)
-{
- unsigned i;
+ if (gimple_code (s) == GIMPLE_ASM
+ && gimple_asm_volatile_p (s))
+ return true;
if (is_gimple_call (s))
{
- unsigned nargs = gimple_call_num_args (s);
-
- if (!(gimple_call_flags (s) & (ECF_CONST | ECF_PURE)))
- return true;
-
- /* We cannot use gimple_has_volatile_ops here,
- because we must ignore a volatile LHS. */
- if (TREE_SIDE_EFFECTS (gimple_call_fn (s))
- || TREE_THIS_VOLATILE (gimple_call_fn (s)))
- {
- gcc_assert (gimple_has_volatile_ops (s));
- return true;
- }
+ int flags = gimple_call_flags (s);
- for (i = 0; i < nargs; i++)
- if (TREE_SIDE_EFFECTS (gimple_call_arg (s, i))
- || TREE_THIS_VOLATILE (gimple_call_arg (s, i)))
- return true;
+ /* An infinite loop is considered a side effect. */
+ if (!(flags & (ECF_CONST | ECF_PURE))
+ || (flags & ECF_LOOPING_CONST_OR_PURE))
+ return true;
return false;
}
- else if (is_gimple_assign (s))
- {
- /* Skip the first operand, the LHS. */
- for (i = 1; i < gimple_num_ops (s); i++)
- if (TREE_SIDE_EFFECTS (gimple_op (s, i))
- || TREE_THIS_VOLATILE (gimple_op (s, i)))
- {
- gcc_assert (gimple_has_volatile_ops (s));
- return true;
- }
- }
- else if (is_gimple_debug (s))
- return false;
- else
- {
- /* For statements without an LHS, examine all arguments. */
- for (i = 0; i < gimple_num_ops (s); i++)
- if (TREE_SIDE_EFFECTS (gimple_op (s, i))
- || TREE_THIS_VOLATILE (gimple_op (s, i)))
- {
- gcc_assert (gimple_has_volatile_ops (s));
- return true;
- }
- }
return false;
}
|| (SYM) == TRUTH_OR_EXPR \
|| (SYM) == TRUTH_XOR_EXPR) ? GIMPLE_BINARY_RHS \
: (SYM) == TRUTH_NOT_EXPR ? GIMPLE_UNARY_RHS \
- : ((SYM) == WIDEN_MULT_PLUS_EXPR \
+ : ((SYM) == COND_EXPR \
+ || (SYM) == WIDEN_MULT_PLUS_EXPR \
|| (SYM) == WIDEN_MULT_MINUS_EXPR \
+ || (SYM) == DOT_PROD_EXPR \
+ || (SYM) == REALIGN_LOAD_EXPR \
+ || (SYM) == VEC_COND_EXPR \
+ || (SYM) == VEC_PERM_EXPR \
|| (SYM) == FMA_EXPR) ? GIMPLE_TERNARY_RHS \
- : ((SYM) == COND_EXPR \
- || (SYM) == CONSTRUCTOR \
+ : ((SYM) == CONSTRUCTOR \
|| (SYM) == OBJ_TYPE_REF \
|| (SYM) == ASSERT_EXPR \
|| (SYM) == ADDR_EXPR \
|| (SYM) == WITH_SIZE_EXPR \
- || (SYM) == SSA_NAME \
- || (SYM) == POLYNOMIAL_CHREC \
- || (SYM) == DOT_PROD_EXPR \
- || (SYM) == VEC_COND_EXPR \
- || (SYM) == REALIGN_LOAD_EXPR) ? GIMPLE_SINGLE_RHS \
+ || (SYM) == SSA_NAME) ? GIMPLE_SINGLE_RHS \
: GIMPLE_INVALID_RHS),
#define END_OF_BASE_TREE_CODES (unsigned char) GIMPLE_INVALID_RHS,
/* Validation of GIMPLE expressions. */
-/* Returns true iff T is a valid RHS for an assignment to a renamed
- user -- or front-end generated artificial -- variable. */
+/* Return true if T is a valid LHS for a GIMPLE assignment expression. */
bool
-is_gimple_reg_rhs (tree t)
-{
- return get_gimple_rhs_class (TREE_CODE (t)) != GIMPLE_INVALID_RHS;
-}
-
-/* Returns true iff T is a valid RHS for an assignment to an un-renamed
- LHS, or for a call argument. */
-
-bool
-is_gimple_mem_rhs (tree t)
-{
- /* If we're dealing with a renamable type, either source or dest must be
- a renamed variable. */
- if (is_gimple_reg_type (TREE_TYPE (t)))
- return is_gimple_val (t);
- else
- return is_gimple_val (t) || is_gimple_lvalue (t);
-}
-
-/* Return true if T is a valid LHS for a GIMPLE assignment expression. */
-
-bool
-is_gimple_lvalue (tree t)
+is_gimple_lvalue (tree t)
{
return (is_gimple_addressable (t)
|| TREE_CODE (t) == WITH_SIZE_EXPR
is_gimple_condexpr (tree t)
{
return (is_gimple_val (t) || (COMPARISON_CLASS_P (t)
- && !tree_could_trap_p (t)
+ && !tree_could_throw_p (t)
&& is_gimple_val (TREE_OPERAND (t, 0))
&& is_gimple_val (TREE_OPERAND (t, 1))));
}
}
}
-/* Strip out all handled components that produce invariant
- offsets. */
-
-static const_tree
-strip_invariant_refs (const_tree op)
-{
- while (handled_component_p (op))
- {
- switch (TREE_CODE (op))
- {
- case ARRAY_REF:
- case ARRAY_RANGE_REF:
- if (!is_gimple_constant (TREE_OPERAND (op, 1))
- || TREE_OPERAND (op, 2) != NULL_TREE
- || TREE_OPERAND (op, 3) != NULL_TREE)
- return NULL;
- break;
-
- case COMPONENT_REF:
- if (TREE_OPERAND (op, 2) != NULL_TREE)
- return NULL;
- break;
-
- default:;
- }
- op = TREE_OPERAND (op, 0);
- }
-
- return op;
-}
-
/* Return true if T is a gimple invariant address. */
bool
return false;
op = strip_invariant_refs (TREE_OPERAND (t, 0));
+ if (!op)
+ return false;
+
+ if (TREE_CODE (op) == MEM_REF)
+ {
+ const_tree op0 = TREE_OPERAND (op, 0);
+ return (TREE_CODE (op0) == ADDR_EXPR
+ && (CONSTANT_CLASS_P (TREE_OPERAND (op0, 0))
+ || decl_address_ip_invariant_p (TREE_OPERAND (op0, 0))));
+ }
- return op && (CONSTANT_CLASS_P (op) || decl_address_ip_invariant_p (op));
+ return CONSTANT_CLASS_P (op) || decl_address_ip_invariant_p (op);
}
/* Return true if T is a GIMPLE minimal invariant. It's a restricted
return is_gimple_constant (t);
}
-/* Return true if T looks like a valid GIMPLE statement. */
-
-bool
-is_gimple_stmt (tree t)
-{
- const enum tree_code code = TREE_CODE (t);
-
- switch (code)
- {
- case NOP_EXPR:
- /* The only valid NOP_EXPR is the empty statement. */
- return IS_EMPTY_STMT (t);
-
- case BIND_EXPR:
- case COND_EXPR:
- /* These are only valid if they're void. */
- return TREE_TYPE (t) == NULL || VOID_TYPE_P (TREE_TYPE (t));
-
- case SWITCH_EXPR:
- case GOTO_EXPR:
- case RETURN_EXPR:
- case LABEL_EXPR:
- case CASE_LABEL_EXPR:
- case TRY_CATCH_EXPR:
- case TRY_FINALLY_EXPR:
- case EH_FILTER_EXPR:
- case CATCH_EXPR:
- case ASM_EXPR:
- case STATEMENT_LIST:
- case OMP_PARALLEL:
- case OMP_FOR:
- case OMP_SECTIONS:
- case OMP_SECTION:
- case OMP_SINGLE:
- case OMP_MASTER:
- case OMP_ORDERED:
- case OMP_CRITICAL:
- case OMP_TASK:
- /* These are always void. */
- return true;
-
- case CALL_EXPR:
- case MODIFY_EXPR:
- case PREDICT_EXPR:
- /* These are valid regardless of their type. */
- return true;
-
- default:
- return false;
- }
-}
-
/* Return true if T is a variable. */
bool
|| TREE_CODE (t) == STRING_CST);
}
-/* Return true if TYPE is a suitable type for a scalar register variable. */
-
-bool
-is_gimple_reg_type (tree type)
-{
- return !AGGREGATE_TYPE_P (type);
-}
-
/* Return true if T is a non-aggregate register variable. */
bool
is_gimple_reg (tree t)
{
if (TREE_CODE (t) == SSA_NAME)
- t = SSA_NAME_VAR (t);
+ {
+ t = SSA_NAME_VAR (t);
+ if (TREE_CODE (t) == VAR_DECL
+ && VAR_DECL_IS_VIRTUAL_OPERAND (t))
+ return false;
+ return true;
+ }
+
+ if (TREE_CODE (t) == VAR_DECL
+ && VAR_DECL_IS_VIRTUAL_OPERAND (t))
+ return false;
if (!is_gimple_variable (t))
return false;
}
-/* Return true if T is a GIMPLE variable whose address is not needed. */
-
-bool
-is_gimple_non_addressable (tree t)
-{
- if (TREE_CODE (t) == SSA_NAME)
- t = SSA_NAME_VAR (t);
-
- return (is_gimple_variable (t) && ! needs_to_live_in_memory (t));
-}
-
/* Return true if T is a GIMPLE rvalue, i.e. an identifier or a constant. */
bool
|| decl_address_invariant_p (TREE_OPERAND (t, 0)))));
}
-/* If T makes a function call, return the corresponding CALL_EXPR operand.
- Otherwise, return NULL_TREE. */
-
-tree
-get_call_expr_in (tree t)
-{
- if (TREE_CODE (t) == MODIFY_EXPR)
- t = TREE_OPERAND (t, 1);
- if (TREE_CODE (t) == WITH_SIZE_EXPR)
- t = TREE_OPERAND (t, 0);
- if (TREE_CODE (t) == CALL_EXPR)
- return t;
- return NULL_TREE;
-}
-
/* Given a memory reference expression T, return its base address.
The base address of a memory reference expression is the main
{
/* Strip conversions around boolean operations. */
if (CONVERT_EXPR_P (t)
- && truth_value_p (TREE_CODE (TREE_OPERAND (t, 0))))
+ && (truth_value_p (TREE_CODE (TREE_OPERAND (t, 0)))
+ || TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0)))
+ == BOOLEAN_TYPE))
t = TREE_OPERAND (t, 0);
- /* For (bool)x use x != 0. */
- if (CONVERT_EXPR_P (t)
- && TREE_CODE (TREE_TYPE (t)) == BOOLEAN_TYPE)
- {
- tree top0 = TREE_OPERAND (t, 0);
- t = build2 (NE_EXPR, TREE_TYPE (t),
- top0, build_int_cst (TREE_TYPE (top0), 0));
- }
/* For !x use x == 0. */
- else if (TREE_CODE (t) == TRUTH_NOT_EXPR)
+ if (TREE_CODE (t) == TRUTH_NOT_EXPR)
{
tree top0 = TREE_OPERAND (t, 0);
t = build2 (EQ_EXPR, TREE_TYPE (t),
gimple_call_copy_skip_args (gimple stmt, bitmap args_to_skip)
{
int i;
- tree fn = gimple_call_fn (stmt);
int nargs = gimple_call_num_args (stmt);
VEC(tree, heap) *vargs = VEC_alloc (tree, heap, nargs);
gimple new_stmt;
if (!bitmap_bit_p (args_to_skip, i))
VEC_quick_push (tree, vargs, gimple_call_arg (stmt, i));
- new_stmt = gimple_build_call_vec (fn, vargs);
+ if (gimple_call_internal_p (stmt))
+ new_stmt = gimple_build_call_internal_vec (gimple_call_internal_fn (stmt),
+ vargs);
+ else
+ new_stmt = gimple_build_call_vec (gimple_call_fn (stmt), vargs);
VEC_free (tree, heap, vargs);
if (gimple_call_lhs (stmt))
gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt));
}
+enum gtc_mode { GTC_MERGE = 0, GTC_DIAG = 1 };
+
static hashval_t gimple_type_hash (const void *);
/* Structure used to maintain a cache of some type pairs compared by
signed char same_p[2];
};
typedef struct type_pair_d *type_pair_t;
-
DEF_VEC_P(type_pair_t);
DEF_VEC_ALLOC_P(type_pair_t,heap);
-/* Return a hash value for the type pair pointed-to by P. */
-
-static hashval_t
-type_pair_hash (const void *p)
-{
- const struct type_pair_d *pair = (const struct type_pair_d *) p;
- hashval_t val1 = pair->uid1;
- hashval_t val2 = pair->uid2;
- return (iterative_hash_hashval_t (val2, val1)
- ^ iterative_hash_hashval_t (val1, val2));
-}
-
-/* Compare two type pairs pointed-to by P1 and P2. */
+#define GIMPLE_TYPE_PAIR_SIZE 16381
+struct type_pair_d *type_pair_cache;
-static int
-type_pair_eq (const void *p1, const void *p2)
-{
- const struct type_pair_d *pair1 = (const struct type_pair_d *) p1;
- const struct type_pair_d *pair2 = (const struct type_pair_d *) p2;
- return ((pair1->uid1 == pair2->uid1 && pair1->uid2 == pair2->uid2)
- || (pair1->uid1 == pair2->uid2 && pair1->uid2 == pair2->uid1));
-}
/* Lookup the pair of types T1 and T2 in *VISITED_P. Insert a new
entry if none existed. */
-static type_pair_t
-lookup_type_pair (tree t1, tree t2, htab_t *visited_p, struct obstack *ob_p)
+static inline type_pair_t
+lookup_type_pair (tree t1, tree t2)
{
- struct type_pair_d pair;
- type_pair_t p;
- void **slot;
+ unsigned int index;
+ unsigned int uid1, uid2;
+
+ if (type_pair_cache == NULL)
+ type_pair_cache = XCNEWVEC (struct type_pair_d, GIMPLE_TYPE_PAIR_SIZE);
- if (*visited_p == NULL)
+ if (TYPE_UID (t1) < TYPE_UID (t2))
{
- *visited_p = htab_create (251, type_pair_hash, type_pair_eq, NULL);
- gcc_obstack_init (ob_p);
+ uid1 = TYPE_UID (t1);
+ uid2 = TYPE_UID (t2);
}
-
- pair.uid1 = TYPE_UID (t1);
- pair.uid2 = TYPE_UID (t2);
- slot = htab_find_slot (*visited_p, &pair, INSERT);
-
- if (*slot)
- p = *((type_pair_t *) slot);
else
{
- p = XOBNEW (ob_p, struct type_pair_d);
- p->uid1 = TYPE_UID (t1);
- p->uid2 = TYPE_UID (t2);
- p->same_p[0] = -2;
- p->same_p[1] = -2;
- *slot = (void *) p;
+ uid1 = TYPE_UID (t2);
+ uid2 = TYPE_UID (t1);
}
+ gcc_checking_assert (uid1 != uid2);
- return p;
+ /* iterative_hash_hashval_t imply an function calls.
+ We know that UIDS are in limited range. */
+ index = ((((unsigned HOST_WIDE_INT)uid1 << HOST_BITS_PER_WIDE_INT / 2) + uid2)
+ % GIMPLE_TYPE_PAIR_SIZE);
+ if (type_pair_cache [index].uid1 == uid1
+ && type_pair_cache [index].uid2 == uid2)
+ return &type_pair_cache[index];
+
+ type_pair_cache [index].uid1 = uid1;
+ type_pair_cache [index].uid2 = uid2;
+ type_pair_cache [index].same_p[0] = -2;
+ type_pair_cache [index].same_p[1] = -2;
+
+ return &type_pair_cache[index];
}
/* Per pointer state for the SCC finding. The on_sccstack flag
} gimple_type_leader_entry;
#define GIMPLE_TYPE_LEADER_SIZE 16381
-static GTY((length("GIMPLE_TYPE_LEADER_SIZE"))) gimple_type_leader_entry
- *gimple_type_leader;
+static GTY((deletable, length("GIMPLE_TYPE_LEADER_SIZE")))
+ gimple_type_leader_entry *gimple_type_leader;
/* Lookup an existing leader for T and return it or NULL_TREE, if
there is none in the cache. */
-static tree
+static inline tree
gimple_lookup_type_leader (tree t)
{
gimple_type_leader_entry *leader;
true if both types have no names. */
static bool
-compare_type_names_p (tree t1, tree t2, bool for_completion_p)
+compare_type_names_p (tree t1, tree t2)
{
tree name1 = TYPE_NAME (t1);
tree name2 = TYPE_NAME (t2);
- /* Consider anonymous types all unique for completion. */
- if (for_completion_p
- && (!name1 || !name2))
+ if ((name1 != NULL_TREE) != (name2 != NULL_TREE))
return false;
- if (name1 && TREE_CODE (name1) == TYPE_DECL)
- {
- name1 = DECL_NAME (name1);
- if (for_completion_p
- && !name1)
- return false;
- }
- gcc_assert (!name1 || TREE_CODE (name1) == IDENTIFIER_NODE);
+ if (name1 == NULL_TREE)
+ return true;
- if (name2 && TREE_CODE (name2) == TYPE_DECL)
- {
- name2 = DECL_NAME (name2);
- if (for_completion_p
- && !name2)
- return false;
- }
- gcc_assert (!name2 || TREE_CODE (name2) == IDENTIFIER_NODE);
+ /* Either both should be a TYPE_DECL or both an IDENTIFIER_NODE. */
+ if (TREE_CODE (name1) != TREE_CODE (name2))
+ return false;
+
+ if (TREE_CODE (name1) == TYPE_DECL)
+ name1 = DECL_NAME (name1);
+ gcc_checking_assert (!name1 || TREE_CODE (name1) == IDENTIFIER_NODE);
+
+ if (TREE_CODE (name2) == TYPE_DECL)
+ name2 = DECL_NAME (name2);
+ gcc_checking_assert (!name2 || TREE_CODE (name2) == IDENTIFIER_NODE);
/* Identifiers can be compared with pointer equality rather
than a string comparison. */
/* Return true if the field decls F1 and F2 are at the same offset.
- This is intended to be used on GIMPLE types only. In order to
- compare GENERIC types, use fields_compatible_p instead. */
+ This is intended to be used on GIMPLE types only. */
bool
gimple_compare_field_offset (tree f1, tree f2)
return false;
}
-/* If the type T1 and the type T2 are a complete and an incomplete
- variant of the same type return true. */
-
-static bool
-gimple_compatible_complete_and_incomplete_subtype_p (tree t1, tree t2)
-{
- /* If one pointer points to an incomplete type variant of
- the other pointed-to type they are the same. */
- if (TREE_CODE (t1) == TREE_CODE (t2)
- && RECORD_OR_UNION_TYPE_P (t1)
- && (!COMPLETE_TYPE_P (t1)
- || !COMPLETE_TYPE_P (t2))
- && TYPE_QUALS (t1) == TYPE_QUALS (t2)
- && compare_type_names_p (TYPE_MAIN_VARIANT (t1),
- TYPE_MAIN_VARIANT (t2), true))
- return true;
- return false;
-}
-
static bool
-gimple_types_compatible_p_1 (tree, tree, enum gtc_mode, type_pair_t,
+gimple_types_compatible_p_1 (tree, tree, type_pair_t,
VEC(type_pair_t, heap) **,
struct pointer_map_t *, struct obstack *);
SCCSTACK, SCCSTATE and SCCSTATE_OBSTACK are state for the DFS walk done. */
static bool
-gtc_visit (tree t1, tree t2, enum gtc_mode mode,
+gtc_visit (tree t1, tree t2,
struct sccs *state,
VEC(type_pair_t, heap) **sccstack,
struct pointer_map_t *sccstate,
struct sccs *cstate = NULL;
type_pair_t p;
void **slot;
+ tree leader1, leader2;
/* Check first for the obvious case of pointer identity. */
if (t1 == t2)
if (t1 == NULL_TREE || t2 == NULL_TREE)
return false;
- /* If the types have been previously registered and found equal
- they still are. */
- if (mode == GTC_MERGE)
- {
- tree leader1 = gimple_lookup_type_leader (t1);
- tree leader2 = gimple_lookup_type_leader (t2);
- if (leader1 == t2
- || t1 == leader2
- || (leader1 && leader1 == leader2))
- return true;
- }
- else if (mode == GTC_DIAG)
- {
- if (TYPE_CANONICAL (t1)
- && TYPE_CANONICAL (t1) == TYPE_CANONICAL (t2))
- return true;
- }
-
/* Can't be the same type if the types don't have the same code. */
if (TREE_CODE (t1) != TREE_CODE (t2))
return false;
if (TYPE_QUALS (t1) != TYPE_QUALS (t2))
return false;
- /* Void types are always the same. */
- if (TREE_CODE (t1) == VOID_TYPE)
+ if (TREE_ADDRESSABLE (t1) != TREE_ADDRESSABLE (t2))
+ return false;
+
+ /* Void types and nullptr types are always the same. */
+ if (TREE_CODE (t1) == VOID_TYPE
+ || TREE_CODE (t1) == NULLPTR_TYPE)
return true;
+ /* Can't be the same type if they have different alignment or mode. */
+ if (TYPE_ALIGN (t1) != TYPE_ALIGN (t2)
+ || TYPE_MODE (t1) != TYPE_MODE (t2))
+ return false;
+
/* Do some simple checks before doing three hashtable queries. */
if (INTEGRAL_TYPE_P (t1)
|| SCALAR_FLOAT_TYPE_P (t1)
|| FIXED_POINT_TYPE_P (t1)
|| TREE_CODE (t1) == VECTOR_TYPE
|| TREE_CODE (t1) == COMPLEX_TYPE
- || TREE_CODE (t1) == OFFSET_TYPE)
+ || TREE_CODE (t1) == OFFSET_TYPE
+ || POINTER_TYPE_P (t1))
{
- /* Can't be the same type if they have different alignment,
- sign, precision or mode. */
- if (TYPE_ALIGN (t1) != TYPE_ALIGN (t2)
- || TYPE_PRECISION (t1) != TYPE_PRECISION (t2)
- || TYPE_MODE (t1) != TYPE_MODE (t2)
+ /* Can't be the same type if they have different sign or precision. */
+ if (TYPE_PRECISION (t1) != TYPE_PRECISION (t2)
|| TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
return false;
if (TREE_CODE (t1) == INTEGER_TYPE
- && (TYPE_IS_SIZETYPE (t1) != TYPE_IS_SIZETYPE (t2)
- || TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2)))
+ && TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2))
return false;
/* That's all we need to check for float and fixed-point types. */
|| FIXED_POINT_TYPE_P (t1))
return true;
- /* For integral types fall thru to more complex checks. */
+ /* For other types fall through to more complex checks. */
}
- else if (AGGREGATE_TYPE_P (t1) || POINTER_TYPE_P (t1))
- {
- /* Can't be the same type if they have different alignment or mode. */
- if (TYPE_ALIGN (t1) != TYPE_ALIGN (t2)
- || TYPE_MODE (t1) != TYPE_MODE (t2))
- return false;
- }
+ /* If the types have been previously registered and found equal
+ they still are. */
+ leader1 = gimple_lookup_type_leader (t1);
+ leader2 = gimple_lookup_type_leader (t2);
+ if (leader1 == t2
+ || t1 == leader2
+ || (leader1 && leader1 == leader2))
+ return true;
/* If the hash values of t1 and t2 are different the types can't
possibly be the same. This helps keeping the type-pair hashtable
return false;
/* Allocate a new cache entry for this comparison. */
- p = lookup_type_pair (t1, t2, >c_visited, >c_ob);
- if (p->same_p[mode] == 0 || p->same_p[mode] == 1)
+ p = lookup_type_pair (t1, t2);
+ if (p->same_p[GTC_MERGE] == 0 || p->same_p[GTC_MERGE] == 1)
{
/* We have already decided whether T1 and T2 are the
same, return the cached result. */
- return p->same_p[mode] == 1;
+ return p->same_p[GTC_MERGE] == 1;
}
if ((slot = pointer_map_contains (sccstate, p)) != NULL)
/* Not yet visited. DFS recurse. */
if (!cstate)
{
- gimple_types_compatible_p_1 (t1, t2, mode, p,
+ gimple_types_compatible_p_1 (t1, t2, p,
sccstack, sccstate, sccstate_obstack);
cstate = (struct sccs *)* pointer_map_contains (sccstate, p);
state->low = MIN (state->low, cstate->low);
SCCSTACK, SCCSTATE and SCCSTATE_OBSTACK are state for the DFS walk done. */
static bool
-gimple_types_compatible_p_1 (tree t1, tree t2, enum gtc_mode mode,
- type_pair_t p,
+gimple_types_compatible_p_1 (tree t1, tree t2, type_pair_t p,
VEC(type_pair_t, heap) **sccstack,
struct pointer_map_t *sccstate,
struct obstack *sccstate_obstack)
{
struct sccs *state;
- gcc_assert (p->same_p[mode] == -2);
+ gcc_assert (p->same_p[GTC_MERGE] == -2);
state = XOBNEW (sccstate_obstack, struct sccs);
*pointer_map_insert (sccstate, p) = state;
SCCs this assumption may get revisited. */
state->u.same_p = 1;
+ /* The struct tags shall compare equal. */
+ if (!compare_type_names_p (t1, t2))
+ goto different_types;
+
+ /* We may not merge typedef types to the same type in different
+ contexts. */
+ if (TYPE_NAME (t1)
+ && TREE_CODE (TYPE_NAME (t1)) == TYPE_DECL
+ && DECL_CONTEXT (TYPE_NAME (t1))
+ && TYPE_P (DECL_CONTEXT (TYPE_NAME (t1))))
+ {
+ if (!gtc_visit (DECL_CONTEXT (TYPE_NAME (t1)),
+ DECL_CONTEXT (TYPE_NAME (t2)),
+ state, sccstack, sccstate, sccstate_obstack))
+ goto different_types;
+ }
+
/* If their attributes are not the same they can't be the same type. */
if (!attribute_list_equal (TYPE_ATTRIBUTES (t1), TYPE_ATTRIBUTES (t2)))
goto different_types;
{
case VECTOR_TYPE:
case COMPLEX_TYPE:
- if (!gtc_visit (TREE_TYPE (t1), TREE_TYPE (t2), mode,
+ if (!gtc_visit (TREE_TYPE (t1), TREE_TYPE (t2),
state, sccstack, sccstate, sccstate_obstack))
goto different_types;
goto same_types;
case ARRAY_TYPE:
/* Array types are the same if the element types are the same and
the number of elements are the same. */
- if (!gtc_visit (TREE_TYPE (t1), TREE_TYPE (t2), mode,
+ if (!gtc_visit (TREE_TYPE (t1), TREE_TYPE (t2),
state, sccstack, sccstate, sccstate_obstack)
|| TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2)
|| TYPE_NONALIASED_COMPONENT (t1) != TYPE_NONALIASED_COMPONENT (t2))
goto same_types;
else if (i1 == NULL_TREE || i2 == NULL_TREE)
goto different_types;
- /* If for a complete array type the possibly gimplified sizes
- are different the types are different. */
- else if (((TYPE_SIZE (i1) != NULL) ^ (TYPE_SIZE (i2) != NULL))
- || (TYPE_SIZE (i1)
- && TYPE_SIZE (i2)
- && !operand_equal_p (TYPE_SIZE (i1), TYPE_SIZE (i2), 0)))
- goto different_types;
else
{
tree min1 = TYPE_MIN_VALUE (i1);
case METHOD_TYPE:
/* Method types should belong to the same class. */
if (!gtc_visit (TYPE_METHOD_BASETYPE (t1), TYPE_METHOD_BASETYPE (t2),
- mode, state, sccstack, sccstate, sccstate_obstack))
+ state, sccstack, sccstate, sccstate_obstack))
goto different_types;
/* Fallthru */
case FUNCTION_TYPE:
/* Function types are the same if the return type and arguments types
are the same. */
- if ((mode != GTC_DIAG
- || !gimple_compatible_complete_and_incomplete_subtype_p
- (TREE_TYPE (t1), TREE_TYPE (t2)))
- && !gtc_visit (TREE_TYPE (t1), TREE_TYPE (t2), mode,
- state, sccstack, sccstate, sccstate_obstack))
+ if (!gtc_visit (TREE_TYPE (t1), TREE_TYPE (t2),
+ state, sccstack, sccstate, sccstate_obstack))
goto different_types;
- if (!targetm.comp_type_attributes (t1, t2))
+ if (!comp_type_attributes (t1, t2))
goto different_types;
if (TYPE_ARG_TYPES (t1) == TYPE_ARG_TYPES (t2))
parms1 && parms2;
parms1 = TREE_CHAIN (parms1), parms2 = TREE_CHAIN (parms2))
{
- if ((mode == GTC_MERGE
- || !gimple_compatible_complete_and_incomplete_subtype_p
- (TREE_VALUE (parms1), TREE_VALUE (parms2)))
- && !gtc_visit (TREE_VALUE (parms1), TREE_VALUE (parms2), mode,
- state, sccstack, sccstate, sccstate_obstack))
+ if (!gtc_visit (TREE_VALUE (parms1), TREE_VALUE (parms2),
+ state, sccstack, sccstate, sccstate_obstack))
goto different_types;
}
case OFFSET_TYPE:
{
- if (!gtc_visit (TREE_TYPE (t1), TREE_TYPE (t2), mode,
+ if (!gtc_visit (TREE_TYPE (t1), TREE_TYPE (t2),
state, sccstack, sccstate, sccstate_obstack)
|| !gtc_visit (TYPE_OFFSET_BASETYPE (t1),
- TYPE_OFFSET_BASETYPE (t2), mode,
+ TYPE_OFFSET_BASETYPE (t2),
state, sccstack, sccstate, sccstate_obstack))
goto different_types;
if (TYPE_REF_CAN_ALIAS_ALL (t1) != TYPE_REF_CAN_ALIAS_ALL (t2))
goto different_types;
- /* If one pointer points to an incomplete type variant of
- the other pointed-to type they are the same. */
- if (mode == GTC_DIAG
- && gimple_compatible_complete_and_incomplete_subtype_p
- (TREE_TYPE (t1), TREE_TYPE (t2)))
- goto same_types;
-
/* Otherwise, pointer and reference types are the same if the
pointed-to types are the same. */
- if (gtc_visit (TREE_TYPE (t1), TREE_TYPE (t2), mode,
+ if (gtc_visit (TREE_TYPE (t1), TREE_TYPE (t2),
state, sccstack, sccstate, sccstate_obstack))
goto same_types;
goto different_types;
}
- case NULLPTR_TYPE:
- /* There is only one decltype(nullptr). */
- goto same_types;
-
case INTEGER_TYPE:
case BOOLEAN_TYPE:
{
if (tree_int_cst_equal (c1, c2) != 1)
goto different_types;
+
+ if (TREE_PURPOSE (v1) != TREE_PURPOSE (v2))
+ goto different_types;
}
/* If one enumeration has more values than the other, they
{
tree f1, f2;
- /* The struct tags shall compare equal. */
- if (!compare_type_names_p (TYPE_MAIN_VARIANT (t1),
- TYPE_MAIN_VARIANT (t2), false))
- goto different_types;
-
/* For aggregate types, all the fields must be the same. */
for (f1 = TYPE_FIELDS (t1), f2 = TYPE_FIELDS (t2);
f1 && f2;
f1 = TREE_CHAIN (f1), f2 = TREE_CHAIN (f2))
{
- /* The fields must have the same name, offset and type. */
+ /* Different field kinds are not compatible. */
+ if (TREE_CODE (f1) != TREE_CODE (f2))
+ goto different_types;
+ /* Field decls must have the same name and offset. */
+ if (TREE_CODE (f1) == FIELD_DECL
+ && (DECL_NONADDRESSABLE_P (f1) != DECL_NONADDRESSABLE_P (f2)
+ || !gimple_compare_field_offset (f1, f2)))
+ goto different_types;
+ /* All entities should have the same name and type. */
if (DECL_NAME (f1) != DECL_NAME (f2)
- || DECL_NONADDRESSABLE_P (f1) != DECL_NONADDRESSABLE_P (f2)
- || !gimple_compare_field_offset (f1, f2)
- || !gtc_visit (TREE_TYPE (f1), TREE_TYPE (f2), mode,
+ || !gtc_visit (TREE_TYPE (f1), TREE_TYPE (f2),
state, sccstack, sccstate, sccstate_obstack))
goto different_types;
}
x = VEC_pop (type_pair_t, *sccstack);
cstate = (struct sccs *)*pointer_map_contains (sccstate, x);
cstate->on_sccstack = false;
- x->same_p[mode] = state->u.same_p;
+ x->same_p[GTC_MERGE] = state->u.same_p;
}
while (x != p);
}
FOR_MERGING_P is true the an incomplete type and a complete type
are considered different, otherwise they are considered compatible. */
-bool
-gimple_types_compatible_p (tree t1, tree t2, enum gtc_mode mode)
+static bool
+gimple_types_compatible_p (tree t1, tree t2)
{
VEC(type_pair_t, heap) *sccstack = NULL;
struct pointer_map_t *sccstate;
struct obstack sccstate_obstack;
type_pair_t p = NULL;
bool res;
+ tree leader1, leader2;
/* Before starting to set up the SCC machinery handle simple cases. */
if (t1 == NULL_TREE || t2 == NULL_TREE)
return false;
- /* If the types have been previously registered and found equal
- they still are. */
- if (mode == GTC_MERGE)
- {
- tree leader1 = gimple_lookup_type_leader (t1);
- tree leader2 = gimple_lookup_type_leader (t2);
- if (leader1 == t2
- || t1 == leader2
- || (leader1 && leader1 == leader2))
- return true;
- }
- else if (mode == GTC_DIAG)
- {
- if (TYPE_CANONICAL (t1)
- && TYPE_CANONICAL (t1) == TYPE_CANONICAL (t2))
- return true;
- }
-
/* Can't be the same type if the types don't have the same code. */
if (TREE_CODE (t1) != TREE_CODE (t2))
return false;
if (TYPE_QUALS (t1) != TYPE_QUALS (t2))
return false;
- /* Void types are always the same. */
- if (TREE_CODE (t1) == VOID_TYPE)
+ if (TREE_ADDRESSABLE (t1) != TREE_ADDRESSABLE (t2))
+ return false;
+
+ /* Void types and nullptr types are always the same. */
+ if (TREE_CODE (t1) == VOID_TYPE
+ || TREE_CODE (t1) == NULLPTR_TYPE)
return true;
+ /* Can't be the same type if they have different alignment or mode. */
+ if (TYPE_ALIGN (t1) != TYPE_ALIGN (t2)
+ || TYPE_MODE (t1) != TYPE_MODE (t2))
+ return false;
+
/* Do some simple checks before doing three hashtable queries. */
if (INTEGRAL_TYPE_P (t1)
|| SCALAR_FLOAT_TYPE_P (t1)
|| FIXED_POINT_TYPE_P (t1)
|| TREE_CODE (t1) == VECTOR_TYPE
|| TREE_CODE (t1) == COMPLEX_TYPE
- || TREE_CODE (t1) == OFFSET_TYPE)
+ || TREE_CODE (t1) == OFFSET_TYPE
+ || POINTER_TYPE_P (t1))
{
- /* Can't be the same type if they have different alignment,
- sign, precision or mode. */
- if (TYPE_ALIGN (t1) != TYPE_ALIGN (t2)
- || TYPE_PRECISION (t1) != TYPE_PRECISION (t2)
- || TYPE_MODE (t1) != TYPE_MODE (t2)
+ /* Can't be the same type if they have different sign or precision. */
+ if (TYPE_PRECISION (t1) != TYPE_PRECISION (t2)
|| TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
return false;
if (TREE_CODE (t1) == INTEGER_TYPE
- && (TYPE_IS_SIZETYPE (t1) != TYPE_IS_SIZETYPE (t2)
- || TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2)))
+ && TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2))
return false;
/* That's all we need to check for float and fixed-point types. */
|| FIXED_POINT_TYPE_P (t1))
return true;
- /* For integral types fall thru to more complex checks. */
+ /* For other types fall through to more complex checks. */
}
- else if (AGGREGATE_TYPE_P (t1) || POINTER_TYPE_P (t1))
- {
- /* Can't be the same type if they have different alignment or mode. */
- if (TYPE_ALIGN (t1) != TYPE_ALIGN (t2)
- || TYPE_MODE (t1) != TYPE_MODE (t2))
- return false;
- }
+ /* If the types have been previously registered and found equal
+ they still are. */
+ leader1 = gimple_lookup_type_leader (t1);
+ leader2 = gimple_lookup_type_leader (t2);
+ if (leader1 == t2
+ || t1 == leader2
+ || (leader1 && leader1 == leader2))
+ return true;
/* If the hash values of t1 and t2 are different the types can't
possibly be the same. This helps keeping the type-pair hashtable
/* If we've visited this type pair before (in the case of aggregates
with self-referential types), and we made a decision, return it. */
- p = lookup_type_pair (t1, t2, >c_visited, >c_ob);
- if (p->same_p[mode] == 0 || p->same_p[mode] == 1)
+ p = lookup_type_pair (t1, t2);
+ if (p->same_p[GTC_MERGE] == 0 || p->same_p[GTC_MERGE] == 1)
{
/* We have already decided whether T1 and T2 are the
same, return the cached result. */
- return p->same_p[mode] == 1;
+ return p->same_p[GTC_MERGE] == 1;
}
/* Now set up the SCC machinery for the comparison. */
gtc_next_dfs_num = 1;
sccstate = pointer_map_create ();
gcc_obstack_init (&sccstate_obstack);
- res = gimple_types_compatible_p_1 (t1, t2, mode, p,
+ res = gimple_types_compatible_p_1 (t1, t2, p,
&sccstack, sccstate, &sccstate_obstack);
VEC_free (type_pair_t, heap, sccstack);
pointer_map_destroy (sccstate);
{
if (!name)
return v;
+ v = iterative_hash_hashval_t (TREE_CODE (name), v);
if (TREE_CODE (name) == TYPE_DECL)
name = DECL_NAME (name);
if (!name)
return iterative_hash_object (IDENTIFIER_HASH_VALUE (name), v);
}
+/* A type, hashvalue pair for sorting SCC members. */
+
+struct type_hash_pair {
+ tree type;
+ hashval_t hash;
+};
+
+/* Compare two type, hashvalue pairs. */
+
+static int
+type_hash_pair_compare (const void *p1_, const void *p2_)
+{
+ const struct type_hash_pair *p1 = (const struct type_hash_pair *) p1_;
+ const struct type_hash_pair *p2 = (const struct type_hash_pair *) p2_;
+ if (p1->hash < p2->hash)
+ return -1;
+ else if (p1->hash > p2->hash)
+ return 1;
+ return 0;
+}
+
/* Returning a hash value for gimple type TYPE combined with VAL.
SCCSTACK, SCCSTATE and SCCSTATE_OBSTACK are state for the DFS walk done.
smaller sets; when searching for existing matching types to merge,
only existing types having the same features as the new type will be
checked. */
- v = iterative_hash_hashval_t (TREE_CODE (type), 0);
+ v = iterative_hash_name (TYPE_NAME (type), 0);
+ if (TYPE_NAME (type)
+ && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+ && DECL_CONTEXT (TYPE_NAME (type))
+ && TYPE_P (DECL_CONTEXT (TYPE_NAME (type))))
+ v = visit (DECL_CONTEXT (TYPE_NAME (type)), state, v,
+ sccstack, sccstate, sccstate_obstack);
+ v = iterative_hash_hashval_t (TREE_CODE (type), v);
v = iterative_hash_hashval_t (TYPE_QUALS (type), v);
v = iterative_hash_hashval_t (TREE_ADDRESSABLE (type), v);
}
/* For pointer and reference types, fold in information about the type
- pointed to but do not recurse into possibly incomplete types to
- avoid hash differences for complete vs. incomplete types. */
+ pointed to. */
if (POINTER_TYPE_P (type))
- {
- if (RECORD_OR_UNION_TYPE_P (TREE_TYPE (type)))
- {
- v = iterative_hash_hashval_t (TREE_CODE (TREE_TYPE (type)), v);
- v = iterative_hash_name
- (TYPE_NAME (TYPE_MAIN_VARIANT (TREE_TYPE (type))), v);
- }
- else
- v = visit (TREE_TYPE (type), state, v,
- sccstack, sccstate, sccstate_obstack);
- }
+ v = visit (TREE_TYPE (type), state, v,
+ sccstack, sccstate, sccstate_obstack);
/* For integer types hash the types min/max values and the string flag. */
if (TREE_CODE (type) == INTEGER_TYPE)
v = iterative_hash_hashval_t (TYPE_STRING_FLAG (type), v);
}
- /* For array types hash their domain and the string flag. */
- if (TREE_CODE (type) == ARRAY_TYPE
- && TYPE_DOMAIN (type))
+ /* For array types hash the domain and the string flag. */
+ if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type))
{
v = iterative_hash_hashval_t (TYPE_STRING_FLAG (type), v);
v = visit (TYPE_DOMAIN (type), state, v,
v = visit (TYPE_METHOD_BASETYPE (type), state, v,
sccstack, sccstate, sccstate_obstack);
- /* For result types allow mismatch in completeness. */
- if (RECORD_OR_UNION_TYPE_P (TREE_TYPE (type)))
- {
- v = iterative_hash_hashval_t (TREE_CODE (TREE_TYPE (type)), v);
- v = iterative_hash_name
- (TYPE_NAME (TYPE_MAIN_VARIANT (TREE_TYPE (type))), v);
- }
- else
- v = visit (TREE_TYPE (type), state, v,
- sccstack, sccstate, sccstate_obstack);
-
+ /* Check result and argument types. */
+ v = visit (TREE_TYPE (type), state, v,
+ sccstack, sccstate, sccstate_obstack);
for (p = TYPE_ARG_TYPES (type), na = 0; p; p = TREE_CHAIN (p))
{
- /* For argument types allow mismatch in completeness. */
- if (RECORD_OR_UNION_TYPE_P (TREE_VALUE (p)))
- {
- v = iterative_hash_hashval_t (TREE_CODE (TREE_VALUE (p)), v);
- v = iterative_hash_name
- (TYPE_NAME (TYPE_MAIN_VARIANT (TREE_VALUE (p))), v);
- }
- else
- v = visit (TREE_VALUE (p), state, v,
- sccstack, sccstate, sccstate_obstack);
+ v = visit (TREE_VALUE (p), state, v,
+ sccstack, sccstate, sccstate_obstack);
na++;
}
v = iterative_hash_hashval_t (na, v);
}
- if (TREE_CODE (type) == RECORD_TYPE
- || TREE_CODE (type) == UNION_TYPE
- || TREE_CODE (type) == QUAL_UNION_TYPE)
+ if (RECORD_OR_UNION_TYPE_P (type))
{
unsigned nf;
tree f;
- v = iterative_hash_name (TYPE_NAME (TYPE_MAIN_VARIANT (type)), v);
-
for (f = TYPE_FIELDS (type), nf = 0; f; f = TREE_CHAIN (f))
{
v = iterative_hash_name (DECL_NAME (f), v);
if (state->low == state->dfsnum)
{
tree x;
+ struct tree_int_map *m;
/* Pop off the SCC and set its hash values. */
- do
+ x = VEC_pop (tree, *sccstack);
+ /* Optimize SCC size one. */
+ if (x == type)
{
- struct sccs *cstate;
- struct tree_int_map *m = ggc_alloc_cleared_tree_int_map ();
- x = VEC_pop (tree, *sccstack);
- cstate = (struct sccs *)*pointer_map_contains (sccstate, x);
- cstate->on_sccstack = false;
+ state->on_sccstack = false;
+ m = ggc_alloc_cleared_tree_int_map ();
m->base.from = x;
- m->to = cstate->u.hash;
+ m->to = v;
slot = htab_find_slot (type_hash_cache, m, INSERT);
gcc_assert (!*slot);
*slot = (void *) m;
}
- while (x != type);
+ else
+ {
+ struct sccs *cstate;
+ unsigned first, i, size, j;
+ struct type_hash_pair *pairs;
+ /* Pop off the SCC and build an array of type, hash pairs. */
+ first = VEC_length (tree, *sccstack) - 1;
+ while (VEC_index (tree, *sccstack, first) != type)
+ --first;
+ size = VEC_length (tree, *sccstack) - first + 1;
+ pairs = XALLOCAVEC (struct type_hash_pair, size);
+ i = 0;
+ cstate = (struct sccs *)*pointer_map_contains (sccstate, x);
+ cstate->on_sccstack = false;
+ pairs[i].type = x;
+ pairs[i].hash = cstate->u.hash;
+ do
+ {
+ x = VEC_pop (tree, *sccstack);
+ cstate = (struct sccs *)*pointer_map_contains (sccstate, x);
+ cstate->on_sccstack = false;
+ ++i;
+ pairs[i].type = x;
+ pairs[i].hash = cstate->u.hash;
+ }
+ while (x != type);
+ gcc_assert (i + 1 == size);
+ /* Sort the arrays of type, hash pairs so that when we mix in
+ all members of the SCC the hash value becomes independent on
+ the order we visited the SCC. Disregard hashes equal to
+ the hash of the type we mix into because we cannot guarantee
+ a stable sort for those across different TUs. */
+ qsort (pairs, size, sizeof (struct type_hash_pair),
+ type_hash_pair_compare);
+ for (i = 0; i < size; ++i)
+ {
+ hashval_t hash;
+ m = ggc_alloc_cleared_tree_int_map ();
+ m->base.from = pairs[i].type;
+ hash = pairs[i].hash;
+ /* Skip same hashes. */
+ for (j = i + 1; j < size && pairs[j].hash == pairs[i].hash; ++j)
+ ;
+ for (; j < size; ++j)
+ hash = iterative_hash_hashval_t (pairs[j].hash, hash);
+ for (j = 0; pairs[j].hash != pairs[i].hash; ++j)
+ hash = iterative_hash_hashval_t (pairs[j].hash, hash);
+ m->to = hash;
+ if (pairs[i].type == type)
+ v = hash;
+ slot = htab_find_slot (type_hash_cache, m, INSERT);
+ gcc_assert (!*slot);
+ *slot = (void *) m;
+ }
+ }
}
return iterative_hash_hashval_t (v, val);
return val;
}
+/* Returning a hash value for gimple type TYPE combined with VAL.
-/* Returns nonzero if P1 and P2 are equal. */
-
-static int
-gimple_type_eq (const void *p1, const void *p2)
-{
- const_tree t1 = (const_tree) p1;
- const_tree t2 = (const_tree) p2;
- return gimple_types_compatible_p (CONST_CAST_TREE (t1),
- CONST_CAST_TREE (t2), GTC_MERGE);
-}
-
-
-/* Register type T in the global type table gimple_types.
- If another type T', compatible with T, already existed in
- gimple_types then return T', otherwise return T. This is used by
- LTO to merge identical types read from different TUs. */
+ The hash value returned is equal for types considered compatible
+ by gimple_canonical_types_compatible_p. */
-tree
-gimple_register_type (tree t)
+static hashval_t
+iterative_hash_canonical_type (tree type, hashval_t val)
{
+ hashval_t v;
void **slot;
- gimple_type_leader_entry *leader;
- tree mv_leader = NULL_TREE;
-
- gcc_assert (TYPE_P (t));
+ struct tree_int_map *mp, m;
- if (!gimple_type_leader)
- gimple_type_leader = ggc_alloc_cleared_vec_gimple_type_leader_entry_s
- (GIMPLE_TYPE_LEADER_SIZE);
- /* If we registered this type before return the cached result. */
- leader = &gimple_type_leader[TYPE_UID (t) % GIMPLE_TYPE_LEADER_SIZE];
- if (leader->type == t)
- return leader->leader;
+ m.base.from = type;
+ if ((slot = htab_find_slot (canonical_type_hash_cache, &m, INSERT))
+ && *slot)
+ return iterative_hash_hashval_t (((struct tree_int_map *) *slot)->to, val);
- /* Always register the main variant first. This is important so we
- pick up the non-typedef variants as canonical, otherwise we'll end
- up taking typedef ids for structure tags during comparison. */
- if (TYPE_MAIN_VARIANT (t) != t)
- mv_leader = gimple_register_type (TYPE_MAIN_VARIANT (t));
+ /* Combine a few common features of types so that types are grouped into
+ smaller sets; when searching for existing matching types to merge,
+ only existing types having the same features as the new type will be
+ checked. */
+ v = iterative_hash_hashval_t (TREE_CODE (type), 0);
+ v = iterative_hash_hashval_t (TREE_ADDRESSABLE (type), v);
+ v = iterative_hash_hashval_t (TYPE_ALIGN (type), v);
+ v = iterative_hash_hashval_t (TYPE_MODE (type), v);
- if (gimple_types == NULL)
- gimple_types = htab_create_ggc (16381, gimple_type_hash, gimple_type_eq, 0);
+ /* Incorporate common features of numerical types. */
+ if (INTEGRAL_TYPE_P (type)
+ || SCALAR_FLOAT_TYPE_P (type)
+ || FIXED_POINT_TYPE_P (type)
+ || TREE_CODE (type) == VECTOR_TYPE
+ || TREE_CODE (type) == COMPLEX_TYPE
+ || TREE_CODE (type) == OFFSET_TYPE
+ || POINTER_TYPE_P (type))
+ {
+ v = iterative_hash_hashval_t (TYPE_PRECISION (type), v);
+ v = iterative_hash_hashval_t (TYPE_UNSIGNED (type), v);
+ }
+
+ /* For pointer and reference types, fold in information about the type
+ pointed to but do not recurse to the pointed-to type. */
+ if (POINTER_TYPE_P (type))
+ {
+ v = iterative_hash_hashval_t (TYPE_REF_CAN_ALIAS_ALL (type), v);
+ v = iterative_hash_hashval_t (TYPE_ADDR_SPACE (TREE_TYPE (type)), v);
+ v = iterative_hash_hashval_t (TYPE_RESTRICT (type), v);
+ v = iterative_hash_hashval_t (TREE_CODE (TREE_TYPE (type)), v);
+ }
+
+ /* For integer types hash only the string flag. */
+ if (TREE_CODE (type) == INTEGER_TYPE)
+ v = iterative_hash_hashval_t (TYPE_STRING_FLAG (type), v);
+
+ /* For array types hash the domain bounds and the string flag. */
+ if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type))
+ {
+ v = iterative_hash_hashval_t (TYPE_STRING_FLAG (type), v);
+ /* OMP lowering can introduce error_mark_node in place of
+ random local decls in types. */
+ if (TYPE_MIN_VALUE (TYPE_DOMAIN (type)) != error_mark_node)
+ v = iterative_hash_expr (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), v);
+ if (TYPE_MAX_VALUE (TYPE_DOMAIN (type)) != error_mark_node)
+ v = iterative_hash_expr (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), v);
+ }
+
+ /* Recurse for aggregates with a single element type. */
+ if (TREE_CODE (type) == ARRAY_TYPE
+ || TREE_CODE (type) == COMPLEX_TYPE
+ || TREE_CODE (type) == VECTOR_TYPE)
+ v = iterative_hash_canonical_type (TREE_TYPE (type), v);
+
+ /* Incorporate function return and argument types. */
+ if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
+ {
+ unsigned na;
+ tree p;
+
+ /* For method types also incorporate their parent class. */
+ if (TREE_CODE (type) == METHOD_TYPE)
+ v = iterative_hash_canonical_type (TYPE_METHOD_BASETYPE (type), v);
+
+ v = iterative_hash_canonical_type (TREE_TYPE (type), v);
+
+ for (p = TYPE_ARG_TYPES (type), na = 0; p; p = TREE_CHAIN (p))
+ {
+ v = iterative_hash_canonical_type (TREE_VALUE (p), v);
+ na++;
+ }
+
+ v = iterative_hash_hashval_t (na, v);
+ }
+
+ if (RECORD_OR_UNION_TYPE_P (type))
+ {
+ unsigned nf;
+ tree f;
+
+ for (f = TYPE_FIELDS (type), nf = 0; f; f = TREE_CHAIN (f))
+ if (TREE_CODE (f) == FIELD_DECL)
+ {
+ v = iterative_hash_canonical_type (TREE_TYPE (f), v);
+ nf++;
+ }
+ v = iterative_hash_hashval_t (nf, v);
+ }
+
+ /* Cache the just computed hash value. */
+ mp = ggc_alloc_cleared_tree_int_map ();
+ mp->base.from = type;
+ mp->to = v;
+ *slot = (void *) mp;
+
+ return iterative_hash_hashval_t (v, val);
+}
+
+static hashval_t
+gimple_canonical_type_hash (const void *p)
+{
+ if (canonical_type_hash_cache == NULL)
+ canonical_type_hash_cache = htab_create_ggc (512, tree_int_map_hash,
+ tree_int_map_eq, NULL);
+
+ return iterative_hash_canonical_type (CONST_CAST_TREE ((const_tree) p), 0);
+}
+
+
+/* Returns nonzero if P1 and P2 are equal. */
+
+static int
+gimple_type_eq (const void *p1, const void *p2)
+{
+ const_tree t1 = (const_tree) p1;
+ const_tree t2 = (const_tree) p2;
+ return gimple_types_compatible_p (CONST_CAST_TREE (t1),
+ CONST_CAST_TREE (t2));
+}
+
+
+/* Worker for gimple_register_type.
+ Register type T in the global type table gimple_types.
+ When REGISTERING_MV is false first recurse for the main variant of T. */
+
+static tree
+gimple_register_type_1 (tree t, bool registering_mv)
+{
+ void **slot;
+ gimple_type_leader_entry *leader;
+
+ /* If we registered this type before return the cached result. */
+ leader = &gimple_type_leader[TYPE_UID (t) % GIMPLE_TYPE_LEADER_SIZE];
+ if (leader->type == t)
+ return leader->leader;
+
+ /* Always register the main variant first. This is important so we
+ pick up the non-typedef variants as canonical, otherwise we'll end
+ up taking typedef ids for structure tags during comparison.
+ It also makes sure that main variants will be merged to main variants.
+ As we are operating on a possibly partially fixed up type graph
+ do not bother to recurse more than once, otherwise we may end up
+ walking in circles.
+ If we are registering a main variant it will either remain its
+ own main variant or it will be merged to something else in which
+ case we do not care for the main variant leader. */
+ if (!registering_mv
+ && TYPE_MAIN_VARIANT (t) != t)
+ gimple_register_type_1 (TYPE_MAIN_VARIANT (t), true);
+
+ /* See if we already have an equivalent type registered. */
slot = htab_find_slot (gimple_types, t, INSERT);
if (*slot
&& *(tree *)slot != t)
{
tree new_type = (tree) *((tree *) slot);
+ leader->type = t;
+ leader->leader = new_type;
+ return new_type;
+ }
- /* Do not merge types with different addressability. */
- gcc_assert (TREE_ADDRESSABLE (t) == TREE_ADDRESSABLE (new_type));
+ /* If not, insert it to the cache and the hash. */
+ leader->type = t;
+ leader->leader = t;
+ *slot = (void *) t;
+ return t;
+}
+
+/* Register type T in the global type table gimple_types.
+ If another type T', compatible with T, already existed in
+ gimple_types then return T', otherwise return T. This is used by
+ LTO to merge identical types read from different TUs. */
+
+tree
+gimple_register_type (tree t)
+{
+ gcc_assert (TYPE_P (t));
+
+ if (!gimple_type_leader)
+ gimple_type_leader = ggc_alloc_cleared_vec_gimple_type_leader_entry_s
+ (GIMPLE_TYPE_LEADER_SIZE);
+
+ if (gimple_types == NULL)
+ gimple_types = htab_create_ggc (16381, gimple_type_hash, gimple_type_eq, 0);
+
+ return gimple_register_type_1 (t, false);
+}
+
+/* The TYPE_CANONICAL merging machinery. It should closely resemble
+ the middle-end types_compatible_p function. It needs to avoid
+ claiming types are different for types that should be treated
+ the same with respect to TBAA. Canonical types are also used
+ for IL consistency checks via the useless_type_conversion_p
+ predicate which does not handle all type kinds itself but falls
+ back to pointer-comparison of TYPE_CANONICAL for aggregates
+ for example. */
+
+/* Return true iff T1 and T2 are structurally identical for what
+ TBAA is concerned. */
+
+static bool
+gimple_canonical_types_compatible_p (tree t1, tree t2)
+{
+ /* Before starting to set up the SCC machinery handle simple cases. */
+
+ /* Check first for the obvious case of pointer identity. */
+ if (t1 == t2)
+ return true;
+
+ /* Check that we have two types to compare. */
+ if (t1 == NULL_TREE || t2 == NULL_TREE)
+ return false;
+
+ /* If the types have been previously registered and found equal
+ they still are. */
+ if (TYPE_CANONICAL (t1)
+ && TYPE_CANONICAL (t1) == TYPE_CANONICAL (t2))
+ return true;
+
+ /* Can't be the same type if the types don't have the same code. */
+ if (TREE_CODE (t1) != TREE_CODE (t2))
+ return false;
+
+ if (TREE_ADDRESSABLE (t1) != TREE_ADDRESSABLE (t2))
+ return false;
+
+ /* Qualifiers do not matter for canonical type comparison purposes. */
+
+ /* Void types and nullptr types are always the same. */
+ if (TREE_CODE (t1) == VOID_TYPE
+ || TREE_CODE (t1) == NULLPTR_TYPE)
+ return true;
+
+ /* Can't be the same type if they have different alignment, or mode. */
+ if (TYPE_ALIGN (t1) != TYPE_ALIGN (t2)
+ || TYPE_MODE (t1) != TYPE_MODE (t2))
+ return false;
+
+ /* Non-aggregate types can be handled cheaply. */
+ if (INTEGRAL_TYPE_P (t1)
+ || SCALAR_FLOAT_TYPE_P (t1)
+ || FIXED_POINT_TYPE_P (t1)
+ || TREE_CODE (t1) == VECTOR_TYPE
+ || TREE_CODE (t1) == COMPLEX_TYPE
+ || TREE_CODE (t1) == OFFSET_TYPE
+ || POINTER_TYPE_P (t1))
+ {
+ /* Can't be the same type if they have different sign or precision. */
+ if (TYPE_PRECISION (t1) != TYPE_PRECISION (t2)
+ || TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
+ return false;
+
+ if (TREE_CODE (t1) == INTEGER_TYPE
+ && TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2))
+ return false;
- /* If t is not its main variant then make t unreachable from its
- main variant list. Otherwise we'd queue up a lot of duplicates
- there. */
- if (t != TYPE_MAIN_VARIANT (t))
+ /* For canonical type comparisons we do not want to build SCCs
+ so we cannot compare pointed-to types. But we can, for now,
+ require the same pointed-to type kind and match what
+ useless_type_conversion_p would do. */
+ if (POINTER_TYPE_P (t1))
{
- tree tem = TYPE_MAIN_VARIANT (t);
- while (tem && TYPE_NEXT_VARIANT (tem) != t)
- tem = TYPE_NEXT_VARIANT (tem);
- if (tem)
- TYPE_NEXT_VARIANT (tem) = TYPE_NEXT_VARIANT (t);
- TYPE_NEXT_VARIANT (t) = NULL_TREE;
+ /* If the two pointers have different ref-all attributes,
+ they can't be the same type. */
+ if (TYPE_REF_CAN_ALIAS_ALL (t1) != TYPE_REF_CAN_ALIAS_ALL (t2))
+ return false;
+
+ if (TYPE_ADDR_SPACE (TREE_TYPE (t1))
+ != TYPE_ADDR_SPACE (TREE_TYPE (t2)))
+ return false;
+
+ if (TYPE_RESTRICT (t1) != TYPE_RESTRICT (t2))
+ return false;
+
+ if (TREE_CODE (TREE_TYPE (t1)) != TREE_CODE (TREE_TYPE (t2)))
+ return false;
}
- /* If we are a pointer then remove us from the pointer-to or
- reference-to chain. Otherwise we'd queue up a lot of duplicates
- there. */
- if (TREE_CODE (t) == POINTER_TYPE)
+ /* Tail-recurse to components. */
+ if (TREE_CODE (t1) == VECTOR_TYPE
+ || TREE_CODE (t1) == COMPLEX_TYPE)
+ return gimple_canonical_types_compatible_p (TREE_TYPE (t1),
+ TREE_TYPE (t2));
+
+ return true;
+ }
+
+ /* If their attributes are not the same they can't be the same type. */
+ if (!attribute_list_equal (TYPE_ATTRIBUTES (t1), TYPE_ATTRIBUTES (t2)))
+ return false;
+
+ /* Do type-specific comparisons. */
+ switch (TREE_CODE (t1))
+ {
+ case ARRAY_TYPE:
+ /* Array types are the same if the element types are the same and
+ the number of elements are the same. */
+ if (!gimple_canonical_types_compatible_p (TREE_TYPE (t1), TREE_TYPE (t2))
+ || TYPE_STRING_FLAG (t1) != TYPE_STRING_FLAG (t2)
+ || TYPE_NONALIASED_COMPONENT (t1) != TYPE_NONALIASED_COMPONENT (t2))
+ return false;
+ else
{
- if (TYPE_POINTER_TO (TREE_TYPE (t)) == t)
- TYPE_POINTER_TO (TREE_TYPE (t)) = TYPE_NEXT_PTR_TO (t);
+ tree i1 = TYPE_DOMAIN (t1);
+ tree i2 = TYPE_DOMAIN (t2);
+
+ /* For an incomplete external array, the type domain can be
+ NULL_TREE. Check this condition also. */
+ if (i1 == NULL_TREE && i2 == NULL_TREE)
+ return true;
+ else if (i1 == NULL_TREE || i2 == NULL_TREE)
+ return false;
else
{
- tree tem = TYPE_POINTER_TO (TREE_TYPE (t));
- while (tem && TYPE_NEXT_PTR_TO (tem) != t)
- tem = TYPE_NEXT_PTR_TO (tem);
- if (tem)
- TYPE_NEXT_PTR_TO (tem) = TYPE_NEXT_PTR_TO (t);
+ tree min1 = TYPE_MIN_VALUE (i1);
+ tree min2 = TYPE_MIN_VALUE (i2);
+ tree max1 = TYPE_MAX_VALUE (i1);
+ tree max2 = TYPE_MAX_VALUE (i2);
+
+ /* The minimum/maximum values have to be the same. */
+ if ((min1 == min2
+ || (min1 && min2
+ && ((TREE_CODE (min1) == PLACEHOLDER_EXPR
+ && TREE_CODE (min2) == PLACEHOLDER_EXPR)
+ || operand_equal_p (min1, min2, 0))))
+ && (max1 == max2
+ || (max1 && max2
+ && ((TREE_CODE (max1) == PLACEHOLDER_EXPR
+ && TREE_CODE (max2) == PLACEHOLDER_EXPR)
+ || operand_equal_p (max1, max2, 0)))))
+ return true;
+ else
+ return false;
}
- TYPE_NEXT_PTR_TO (t) = NULL_TREE;
}
- else if (TREE_CODE (t) == REFERENCE_TYPE)
+
+ case METHOD_TYPE:
+ /* Method types should belong to the same class. */
+ if (!gimple_canonical_types_compatible_p
+ (TYPE_METHOD_BASETYPE (t1), TYPE_METHOD_BASETYPE (t2)))
+ return false;
+
+ /* Fallthru */
+
+ case FUNCTION_TYPE:
+ /* Function types are the same if the return type and arguments types
+ are the same. */
+ if (!gimple_canonical_types_compatible_p (TREE_TYPE (t1), TREE_TYPE (t2)))
+ return false;
+
+ if (!comp_type_attributes (t1, t2))
+ return false;
+
+ if (TYPE_ARG_TYPES (t1) == TYPE_ARG_TYPES (t2))
+ return true;
+ else
{
- if (TYPE_REFERENCE_TO (TREE_TYPE (t)) == t)
- TYPE_REFERENCE_TO (TREE_TYPE (t)) = TYPE_NEXT_REF_TO (t);
- else
+ tree parms1, parms2;
+
+ for (parms1 = TYPE_ARG_TYPES (t1), parms2 = TYPE_ARG_TYPES (t2);
+ parms1 && parms2;
+ parms1 = TREE_CHAIN (parms1), parms2 = TREE_CHAIN (parms2))
{
- tree tem = TYPE_REFERENCE_TO (TREE_TYPE (t));
- while (tem && TYPE_NEXT_REF_TO (tem) != t)
- tem = TYPE_NEXT_REF_TO (tem);
- if (tem)
- TYPE_NEXT_REF_TO (tem) = TYPE_NEXT_REF_TO (t);
+ if (!gimple_canonical_types_compatible_p
+ (TREE_VALUE (parms1), TREE_VALUE (parms2)))
+ return false;
}
- TYPE_NEXT_REF_TO (t) = NULL_TREE;
- }
- leader->type = t;
- leader->leader = new_type;
- t = new_type;
- }
- else
- {
- leader->type = t;
- leader->leader = t;
- /* We're the type leader. Make our TYPE_MAIN_VARIANT valid. */
- if (TYPE_MAIN_VARIANT (t) != t
- && TYPE_MAIN_VARIANT (t) != mv_leader)
- {
- /* Remove us from our main variant list as we are not the variant
- leader and the variant leader will change. */
- tree tem = TYPE_MAIN_VARIANT (t);
- while (tem && TYPE_NEXT_VARIANT (tem) != t)
- tem = TYPE_NEXT_VARIANT (tem);
- if (tem)
- TYPE_NEXT_VARIANT (tem) = TYPE_NEXT_VARIANT (t);
- TYPE_NEXT_VARIANT (t) = NULL_TREE;
- /* Adjust our main variant. Linking us into its variant list
- will happen at fixup time. */
- TYPE_MAIN_VARIANT (t) = mv_leader;
+ if (parms1 || parms2)
+ return false;
+
+ return true;
}
- *slot = (void *) t;
- }
- return t;
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ {
+ tree f1, f2;
+
+ /* For aggregate types, all the fields must be the same. */
+ for (f1 = TYPE_FIELDS (t1), f2 = TYPE_FIELDS (t2);
+ f1 || f2;
+ f1 = TREE_CHAIN (f1), f2 = TREE_CHAIN (f2))
+ {
+ /* Skip non-fields. */
+ while (f1 && TREE_CODE (f1) != FIELD_DECL)
+ f1 = TREE_CHAIN (f1);
+ while (f2 && TREE_CODE (f2) != FIELD_DECL)
+ f2 = TREE_CHAIN (f2);
+ if (!f1 || !f2)
+ break;
+ /* The fields must have the same name, offset and type. */
+ if (DECL_NONADDRESSABLE_P (f1) != DECL_NONADDRESSABLE_P (f2)
+ || !gimple_compare_field_offset (f1, f2)
+ || !gimple_canonical_types_compatible_p
+ (TREE_TYPE (f1), TREE_TYPE (f2)))
+ return false;
+ }
+
+ /* If one aggregate has more fields than the other, they
+ are not the same. */
+ if (f1 || f2)
+ return false;
+
+ return true;
+ }
+
+ default:
+ gcc_unreachable ();
+ }
}
{
const_tree t1 = (const_tree) p1;
const_tree t2 = (const_tree) p2;
- return gimple_types_compatible_p (CONST_CAST_TREE (t1),
- CONST_CAST_TREE (t2), GTC_DIAG);
+ return gimple_canonical_types_compatible_p (CONST_CAST_TREE (t1),
+ CONST_CAST_TREE (t2));
}
/* Register type T in the global type table gimple_types.
If another type T', compatible with T, already existed in
gimple_types then return T', otherwise return T. This is used by
- LTO to merge identical types read from different TUs. */
+ LTO to merge identical types read from different TUs.
+
+ ??? This merging does not exactly match how the tree.c middle-end
+ functions will assign TYPE_CANONICAL when new types are created
+ during optimization (which at least happens for pointer and array
+ types). */
tree
gimple_register_canonical_type (tree t)
{
void **slot;
- tree orig_t = t;
gcc_assert (TYPE_P (t));
if (TYPE_CANONICAL (t))
return TYPE_CANONICAL (t);
- /* Always register the type itself first so that if it turns out
- to be the canonical type it will be the one we merge to as well. */
- t = gimple_register_type (t);
-
- /* Always register the main variant first. This is important so we
- pick up the non-typedef variants as canonical, otherwise we'll end
- up taking typedef ids for structure tags during comparison. */
- if (TYPE_MAIN_VARIANT (t) != t)
- gimple_register_canonical_type (TYPE_MAIN_VARIANT (t));
-
if (gimple_canonical_types == NULL)
- gimple_canonical_types = htab_create_ggc (16381, gimple_type_hash,
+ gimple_canonical_types = htab_create_ggc (16381, gimple_canonical_type_hash,
gimple_canonical_type_eq, 0);
slot = htab_find_slot (gimple_canonical_types, t, INSERT);
*slot = (void *) t;
}
- /* Also cache the canonical type in the non-leaders. */
- TYPE_CANONICAL (orig_t) = t;
-
return t;
}
htab_collisions (gimple_types));
else
fprintf (stderr, "GIMPLE type table is empty\n");
+ if (type_hash_cache)
+ fprintf (stderr, "GIMPLE type hash table: size %ld, %ld elements, "
+ "%ld searches, %ld collisions (ratio: %f)\n",
+ (long) htab_size (type_hash_cache),
+ (long) htab_elements (type_hash_cache),
+ (long) type_hash_cache->searches,
+ (long) type_hash_cache->collisions,
+ htab_collisions (type_hash_cache));
+ else
+ fprintf (stderr, "GIMPLE type hash table is empty\n");
if (gimple_canonical_types)
fprintf (stderr, "GIMPLE canonical type table: size %ld, %ld elements, "
"%ld searches, %ld collisions (ratio: %f)\n",
htab_collisions (gimple_canonical_types));
else
fprintf (stderr, "GIMPLE canonical type table is empty\n");
- if (type_hash_cache)
- fprintf (stderr, "GIMPLE type hash table: size %ld, %ld elements, "
+ if (canonical_type_hash_cache)
+ fprintf (stderr, "GIMPLE canonical type hash table: size %ld, %ld elements, "
"%ld searches, %ld collisions (ratio: %f)\n",
- (long) htab_size (type_hash_cache),
- (long) htab_elements (type_hash_cache),
- (long) type_hash_cache->searches,
- (long) type_hash_cache->collisions,
- htab_collisions (type_hash_cache));
+ (long) htab_size (canonical_type_hash_cache),
+ (long) htab_elements (canonical_type_hash_cache),
+ (long) canonical_type_hash_cache->searches,
+ (long) canonical_type_hash_cache->collisions,
+ htab_collisions (canonical_type_hash_cache));
else
- fprintf (stderr, "GIMPLE type hash table is empty\n");
- if (gtc_visited)
- fprintf (stderr, "GIMPLE type comparison table: size %ld, %ld "
- "elements, %ld searches, %ld collisions (ratio: %f)\n",
- (long) htab_size (gtc_visited),
- (long) htab_elements (gtc_visited),
- (long) gtc_visited->searches,
- (long) gtc_visited->collisions,
- htab_collisions (gtc_visited));
- else
- fprintf (stderr, "GIMPLE type comparison table is empty\n");
+ fprintf (stderr, "GIMPLE canonical type hash table is empty\n");
}
/* Free the gimple type hashtables used for LTO type merging. */
htab_delete (type_hash_cache);
type_hash_cache = NULL;
}
- if (gtc_visited)
+ if (canonical_type_hash_cache)
+ {
+ htab_delete (canonical_type_hash_cache);
+ canonical_type_hash_cache = NULL;
+ }
+ if (type_pair_cache)
{
- htab_delete (gtc_visited);
- obstack_free (>c_ob, NULL);
- gtc_visited = NULL;
+ free (type_pair_cache);
+ type_pair_cache = NULL;
}
gimple_type_leader = NULL;
}
&& TREE_CODE (OBJ_TYPE_REF_OBJECT (rhs)) == ADDR_EXPR)
ret |= visit_addr (stmt, TREE_OPERAND (OBJ_TYPE_REF_OBJECT (rhs),
0), data);
+ else if (TREE_CODE (rhs) == CONSTRUCTOR)
+ {
+ unsigned int ix;
+ tree val;
+
+ FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (rhs), ix, val)
+ if (TREE_CODE (val) == ADDR_EXPR)
+ ret |= visit_addr (stmt, TREE_OPERAND (val, 0), data);
+ else if (TREE_CODE (val) == OBJ_TYPE_REF
+ && TREE_CODE (OBJ_TYPE_REF_OBJECT (val)) == ADDR_EXPR)
+ ret |= visit_addr (stmt,
+ TREE_OPERAND (OBJ_TYPE_REF_OBJECT (val),
+ 0), data);
+ }
lhs = gimple_assign_lhs (stmt);
if (TREE_CODE (lhs) == TARGET_MEM_REF
&& TREE_CODE (TMR_BASE (lhs)) == ADDR_EXPR)
|| gimple_code (stmt) == GIMPLE_COND))
{
for (i = 0; i < gimple_num_ops (stmt); ++i)
- if (gimple_op (stmt, i)
- && TREE_CODE (gimple_op (stmt, i)) == ADDR_EXPR)
- ret |= visit_addr (stmt, TREE_OPERAND (gimple_op (stmt, i), 0), data);
+ {
+ tree op = gimple_op (stmt, i);
+ if (op == NULL_TREE)
+ ;
+ else if (TREE_CODE (op) == ADDR_EXPR)
+ ret |= visit_addr (stmt, TREE_OPERAND (op, 0), data);
+ /* COND_EXPR and VCOND_EXPR rhs1 argument is a comparison
+ tree with two operands. */
+ else if (i == 1 && COMPARISON_CLASS_P (op))
+ {
+ if (TREE_CODE (TREE_OPERAND (op, 0)) == ADDR_EXPR)
+ ret |= visit_addr (stmt, TREE_OPERAND (TREE_OPERAND (op, 0),
+ 0), data);
+ if (TREE_CODE (TREE_OPERAND (op, 1)) == ADDR_EXPR)
+ ret |= visit_addr (stmt, TREE_OPERAND (TREE_OPERAND (op, 1),
+ 0), data);
+ }
+ }
}
else if (is_gimple_call (stmt))
{
&& DECL_FUNCTION_CODE (fndecl) == code);
}
+/* Return true if STMT clobbers memory. STMT is required to be a
+ GIMPLE_ASM. */
+
+bool
+gimple_asm_clobbers_memory_p (const_gimple stmt)
+{
+ unsigned i;
+
+ for (i = 0; i < gimple_asm_nclobbers (stmt); i++)
+ {
+ tree op = gimple_asm_clobber_op (stmt, i);
+ if (strcmp (TREE_STRING_POINTER (TREE_VALUE (op)), "memory") == 0)
+ return true;
+ }
+
+ return false;
+}
#include "gt-gimple.h"