if (pm)
{
unsigned num_ssas_purged = 0;
- auto_vec<const decl_region *> ssa_name_regs;
- new_state.m_region_model->get_ssa_name_regions_for_current_frame
- (&ssa_name_regs);
- ssa_name_regs.qsort (region::cmp_ptr_ptr);
+ unsigned num_decls_purged = 0;
+ auto_vec<const decl_region *> regs;
+ new_state.m_region_model->get_regions_for_current_frame (®s);
+ regs.qsort (region::cmp_ptr_ptr);
unsigned i;
const decl_region *reg;
- FOR_EACH_VEC_ELT (ssa_name_regs, i, reg)
+ FOR_EACH_VEC_ELT (regs, i, reg)
{
- tree ssa_name = reg->get_decl ();
- const state_purge_per_ssa_name &per_ssa
- = pm->get_data_for_ssa_name (ssa_name);
- if (!per_ssa.needed_at_point_p (point.get_function_point ()))
+ const tree node = reg->get_decl ();
+ if (TREE_CODE (node) == SSA_NAME)
{
- /* Don't purge bindings of SSA names to svalues
- that have unpurgable sm-state, so that leaks are
- reported at the end of the function, rather than
- at the last place that such an SSA name is referred to.
-
- But do purge them for temporaries (when SSA_NAME_VAR is
- NULL), so that we report for cases where a leak happens when
- a variable is overwritten with another value, so that the leak
- is reported at the point of overwrite, rather than having
- temporaries keep the value reachable until the frame is
- popped. */
- const svalue *sval
- = new_state.m_region_model->get_store_value (reg, NULL);
- if (!new_state.can_purge_p (eg.get_ext_state (), sval)
- && SSA_NAME_VAR (ssa_name))
+ const tree ssa_name = node;
+ const state_purge_per_ssa_name &per_ssa
+ = pm->get_data_for_ssa_name (node);
+ if (!per_ssa.needed_at_point_p (point.get_function_point ()))
{
- /* (currently only state maps can keep things
- alive). */
- if (logger)
- logger->log ("not purging binding for %qE"
- " (used by state map)", ssa_name);
- continue;
+ /* Don't purge bindings of SSA names to svalues
+ that have unpurgable sm-state, so that leaks are
+ reported at the end of the function, rather than
+ at the last place that such an SSA name is referred to.
+
+ But do purge them for temporaries (when SSA_NAME_VAR is
+ NULL), so that we report for cases where a leak happens when
+ a variable is overwritten with another value, so that the leak
+ is reported at the point of overwrite, rather than having
+ temporaries keep the value reachable until the frame is
+ popped. */
+ const svalue *sval
+ = new_state.m_region_model->get_store_value (reg, NULL);
+ if (!new_state.can_purge_p (eg.get_ext_state (), sval)
+ && SSA_NAME_VAR (ssa_name))
+ {
+ /* (currently only state maps can keep things
+ alive). */
+ if (logger)
+ logger->log ("not purging binding for %qE"
+ " (used by state map)", ssa_name);
+ continue;
+ }
+
+ new_state.m_region_model->purge_region (reg);
+ num_ssas_purged++;
}
-
- new_state.m_region_model->purge_region (reg);
- num_ssas_purged++;
+ }
+ else
+ {
+ const tree decl = node;
+ gcc_assert (TREE_CODE (node) == VAR_DECL
+ || TREE_CODE (node) == PARM_DECL
+ || TREE_CODE (node) == RESULT_DECL);
+ if (const state_purge_per_decl *per_decl
+ = pm->get_any_data_for_decl (decl))
+ if (!per_decl->needed_at_point_p (point.get_function_point ()))
+ {
+ /* Don't purge bindings of decls if there are svalues
+ that have unpurgable sm-state within the decl's cluster,
+ so that leaks are reported at the end of the function,
+ rather than at the last place that such a decl is
+ referred to. */
+ if (!new_state.can_purge_base_region_p (eg.get_ext_state (),
+ reg))
+ {
+ /* (currently only state maps can keep things
+ alive). */
+ if (logger)
+ logger->log ("not purging binding for %qE"
+ " (value in binding used by state map)",
+ decl);
+ continue;
+ }
+
+ new_state.m_region_model->purge_region (reg);
+ num_decls_purged++;
+ }
}
}
- if (num_ssas_purged > 0)
+ if (num_ssas_purged > 0 || num_decls_purged > 0)
{
if (logger)
- logger->log ("num_ssas_purged: %i", num_ssas_purged);
+ {
+ logger->log ("num_ssas_purged: %i", num_ssas_purged);
+ logger->log ("num_decl_purged: %i", num_decls_purged);
+ }
impl_region_model_context ctxt (eg, enode_for_diag,
this,
&new_state,
return new_state;
}
+/* Return true if there are no unpurgeable bindings within BASE_REG. */
+
+bool
+program_state::can_purge_base_region_p (const extrinsic_state &ext_state,
+ const region *base_reg) const
+{
+ binding_cluster *cluster
+ = m_region_model->get_store ()->get_cluster (base_reg);
+ if (!cluster)
+ return true;
+
+ for (auto iter : *cluster)
+ {
+ const svalue *sval = iter.second;
+ if (!can_purge_p (ext_state, sval))
+ return false;
+ }
+
+ return true;
+}
+
/* Get a representative tree to use for describing SVAL. */
tree
#include "analyzer/program-point.h"
#include "analyzer/analyzer-logging.h"
#include "analyzer/state-purge.h"
+#include "tristate.h"
+#include "selftest.h"
+#include "analyzer/store.h"
+#include "analyzer/region-model.h"
+#include "gimple-walk.h"
#if ENABLE_ANALYZER
+/* Given NODE at an access, determine if this access is within
+ a decl that could be consider for purging, and if so, return the decl. */
+
+static tree
+get_candidate_for_purging (tree node)
+{
+ tree iter = node;
+ while (1)
+ switch (TREE_CODE (iter))
+ {
+ default:
+ return NULL_TREE;
+
+ case COMPONENT_REF:
+ iter = TREE_OPERAND (iter, 0);
+ continue;
+
+ case VAR_DECL:
+ if (is_global_var (iter))
+ return NULL_TREE;
+ else
+ return iter;
+
+ case PARM_DECL:
+ case RESULT_DECL:
+ return iter;
+ }
+}
+
+/* Class-based handler for walk_stmt_load_store_addr_ops at a particular
+ function_point, for populating the worklists within a state_purge_map. */
+
+class gimple_op_visitor : public log_user
+{
+public:
+ gimple_op_visitor (state_purge_map *map,
+ const function_point &point,
+ function *fun)
+ : log_user (map->get_logger ()),
+ m_map (map),
+ m_point (point),
+ m_fun (fun)
+ {}
+
+ bool on_load (gimple *stmt, tree base, tree op)
+ {
+ LOG_FUNC (get_logger ());
+ if (get_logger ())
+ {
+ pretty_printer pp;
+ pp_gimple_stmt_1 (&pp, stmt, 0, (dump_flags_t)0);
+ log ("on_load: %s; base: %qE, op: %qE",
+ pp_formatted_text (&pp), base, op);
+ }
+ if (tree node = get_candidate_for_purging (base))
+ add_needed (node);
+ return true;
+ }
+
+ bool on_store (gimple *stmt, tree base, tree op)
+ {
+ LOG_FUNC (get_logger ());
+ if (get_logger ())
+ {
+ pretty_printer pp;
+ pp_gimple_stmt_1 (&pp, stmt, 0, (dump_flags_t)0);
+ log ("on_store: %s; base: %qE, op: %qE",
+ pp_formatted_text (&pp), base, op);
+ }
+ return true;
+ }
+
+ bool on_addr (gimple *stmt, tree base, tree op)
+ {
+ LOG_FUNC (get_logger ());
+ if (get_logger ())
+ {
+ pretty_printer pp;
+ pp_gimple_stmt_1 (&pp, stmt, 0, (dump_flags_t)0);
+ log ("on_addr: %s; base: %qE, op: %qE",
+ pp_formatted_text (&pp), base, op);
+ }
+ if (TREE_CODE (op) != ADDR_EXPR)
+ return true;
+ if (tree node = get_candidate_for_purging (base))
+ {
+ add_needed (node);
+ add_pointed_to (node);
+ }
+ return true;
+ }
+
+private:
+ void add_needed (tree decl)
+ {
+ gcc_assert (get_candidate_for_purging (decl) == decl);
+ state_purge_per_decl &data
+ = get_or_create_data_for_decl (decl);
+ data.add_needed_at (m_point);
+
+ /* Handle calls: if we're seeing a use at a call, then add a use at the
+ "after-supernode" point (in case of interprocedural call superedges). */
+ if (m_point.final_stmt_p ())
+ data.add_needed_at (m_point.get_next ());
+ }
+
+ void add_pointed_to (tree decl)
+ {
+ gcc_assert (get_candidate_for_purging (decl) == decl);
+ get_or_create_data_for_decl (decl).add_pointed_to_at (m_point);
+ }
+
+ state_purge_per_decl &
+ get_or_create_data_for_decl (tree decl)
+ {
+ return m_map->get_or_create_data_for_decl (m_fun, decl);
+ }
+
+ state_purge_map *m_map;
+ const function_point &m_point;
+ function *m_fun;
+};
+
+static bool
+my_load_cb (gimple *stmt, tree base, tree op, void *user_data)
+{
+ gimple_op_visitor *x = (gimple_op_visitor *)user_data;
+ return x->on_load (stmt, base, op);
+}
+
+static bool
+my_store_cb (gimple *stmt, tree base, tree op, void *user_data)
+{
+ gimple_op_visitor *x = (gimple_op_visitor *)user_data;
+ return x->on_store (stmt, base, op);
+}
+
+static bool
+my_addr_cb (gimple *stmt, tree base, tree op, void *user_data)
+{
+ gimple_op_visitor *x = (gimple_op_visitor *)user_data;
+ return x->on_addr (stmt, base, op);
+}
+
/* state_purge_map's ctor. Walk all SSA names in all functions, building
- a state_purge_per_ssa_name instance for each. */
+ a state_purge_per_ssa_name instance for each.
+ Also, walk all loads and address-taken ops of local variables, building
+ a state_purge_per_decl as appropriate. */
state_purge_map::state_purge_map (const supergraph &sg,
+ region_model_manager *mgr,
logger *logger)
: log_user (logger), m_sg (sg)
{
if (TREE_CODE (var) == VAR_DECL)
if (VAR_DECL_IS_VIRTUAL_OPERAND (var))
continue;
- m_map.put (name, new state_purge_per_ssa_name (*this, name, fun));
+ m_ssa_map.put (name, new state_purge_per_ssa_name (*this, name, fun));
}
}
+
+ /* Find all uses of local vars.
+ We iterate through all function points, finding loads, stores, and
+ address-taken operations on locals, building a pair of worklists. */
+ for (auto snode : sg.m_nodes)
+ {
+ if (logger)
+ log ("SN: %i", snode->m_index);
+ /* We ignore m_returning_call and phi nodes. */
+ gimple *stmt;
+ unsigned i;
+ FOR_EACH_VEC_ELT (snode->m_stmts, i, stmt)
+ {
+ function_point point (function_point::before_stmt (snode, i));
+ gimple_op_visitor v (this, point, snode->get_function ());
+ walk_stmt_load_store_addr_ops (stmt, &v,
+ my_load_cb, my_store_cb, my_addr_cb);
+ }
+ }
+
+ /* Now iterate through the m_decl_map, processing each pair of worklists. */
+ for (state_purge_map::decl_iterator iter = begin_decls ();
+ iter != end_decls ();
+ ++iter)
+ {
+ state_purge_per_decl *per_decl_data = (*iter).second;
+ per_decl_data->process_worklists (*this, mgr);
+ }
}
/* state_purge_map's dtor. */
state_purge_map::~state_purge_map ()
{
- for (iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
- delete (*iter).second;
+ for (auto iter : m_ssa_map)
+ delete iter.second;
+ for (auto iter : m_decl_map)
+ delete iter.second;
}
+/* Get the state_purge_per_decl for local DECL within FUN, creating it
+ if necessary. */
+
+state_purge_per_decl &
+state_purge_map::get_or_create_data_for_decl (function *fun, tree decl)
+{
+ if (state_purge_per_decl **slot
+ = const_cast <decl_map_t&> (m_decl_map).get (decl))
+ return **slot;
+ state_purge_per_decl *result = new state_purge_per_decl (*this, decl, fun);
+ m_decl_map.put (decl, result);
+ return *result;
+}
+
+/* class state_purge_per_ssa_name : public state_purge_per_tree. */
+
/* state_purge_per_ssa_name's ctor.
Locate all uses of VAR within FUN.
state_purge_per_ssa_name::state_purge_per_ssa_name (const state_purge_map &map,
tree name,
function *fun)
-: m_points_needing_name (), m_name (name), m_fun (fun)
+: state_purge_per_tree (fun), m_points_needing_name (), m_name (name)
{
LOG_FUNC (map.get_logger ());
logger->end_log_line ();
}
- gcc_assert (point.get_function () == m_fun);
+ gcc_assert (point.get_function () == get_function ());
if (point.get_from_edge ())
gcc_assert (point.get_from_edge ()->get_kind () == SUPEREDGE_CFG_EDGE);
}
}
+/* class state_purge_per_decl : public state_purge_per_tree. */
+
+/* state_purge_per_decl's ctor. */
+
+state_purge_per_decl::state_purge_per_decl (const state_purge_map &map,
+ tree decl,
+ function *fun)
+: state_purge_per_tree (fun),
+ m_decl (decl)
+{
+ /* The RESULT_DECL is always needed at the end of its function. */
+ if (TREE_CODE (decl) == RESULT_DECL)
+ {
+ supernode *exit_snode = map.get_sg ().get_node_for_function_exit (fun);
+ add_needed_at (function_point::after_supernode (exit_snode));
+ }
+}
+
+/* Mark the value of the decl (or a subvalue within it) as being needed
+ at POINT. */
+
+void
+state_purge_per_decl::add_needed_at (const function_point &point)
+{
+ m_points_needing_decl.add (point);
+}
+
+/* Mark that a pointer to the decl (or a region within it) is taken
+ at POINT. */
+
+void
+state_purge_per_decl::add_pointed_to_at (const function_point &point)
+{
+ m_points_taking_address.add (point);
+}
+
+/* Process the worklists for this decl:
+ (a) walk backwards from points where we know the value of the decl
+ is needed, marking points until we get to a stmt that fully overwrites
+ the decl.
+ (b) walk forwards from points where the address of the decl is taken,
+ marking points as potentially needing the value of the decl. */
+
+void
+state_purge_per_decl::process_worklists (const state_purge_map &map,
+ region_model_manager *mgr)
+{
+ logger *logger = map.get_logger ();
+ LOG_SCOPE (logger);
+ if (logger)
+ logger->log ("decl: %qE within %qD", m_decl, get_fndecl ());
+
+ /* Worklist for walking backwards from uses. */
+ {
+ auto_vec<function_point> worklist;
+ point_set_t seen;
+
+ /* Add all uses of the decl to the worklist. */
+ for (auto iter : m_points_needing_decl)
+ worklist.safe_push (iter);
+
+ region_model model (mgr);
+ model.push_frame (get_function (), NULL, NULL);
+
+ /* Process worklist by walking backwards until we reach a stmt
+ that fully overwrites the decl. */
+ {
+ log_scope s (logger, "processing worklist");
+ while (worklist.length () > 0)
+ {
+ function_point point = worklist.pop ();
+ process_point_backwards (point, &worklist, &seen, map, model);
+ }
+ }
+ }
+
+ /* Worklist for walking forwards from address-taken points. */
+ {
+ auto_vec<function_point> worklist;
+ point_set_t seen;
+
+ /* Add all uses of the decl to the worklist. */
+ for (auto iter : m_points_taking_address)
+ {
+ worklist.safe_push (iter);
+
+ /* Add to m_points_needing_decl (now that we traversed
+ it above for the backward worklist). */
+ m_points_needing_decl.add (iter);
+ }
+
+ /* Process worklist by walking forwards. */
+ {
+ log_scope s (logger, "processing worklist");
+ while (worklist.length () > 0)
+ {
+ function_point point = worklist.pop ();
+ process_point_forwards (point, &worklist, &seen, map);
+ }
+ }
+ }
+}
+
+/* Add POINT to *WORKLIST if the point is not already in *seen.
+ Add newly seen points to *SEEN and to m_points_needing_name. */
+
+void
+state_purge_per_decl::add_to_worklist (const function_point &point,
+ auto_vec<function_point> *worklist,
+ point_set_t *seen,
+ logger *logger)
+{
+ LOG_FUNC (logger);
+ if (logger)
+ {
+ logger->start_log_line ();
+ logger->log_partial ("point: '");
+ point.print (logger->get_printer (), format (false));
+ logger->log_partial ("' for worklist for %qE", m_decl);
+ logger->end_log_line ();
+ }
+
+ gcc_assert (point.get_function () == get_function ());
+ if (point.get_from_edge ())
+ gcc_assert (point.get_from_edge ()->get_kind () == SUPEREDGE_CFG_EDGE);
+
+ if (seen->contains (point))
+ {
+ if (logger)
+ logger->log ("already seen for %qE", m_decl);
+ }
+ else
+ {
+ if (logger)
+ logger->log ("not seen; adding to worklist for %qE", m_decl);
+ m_points_needing_decl.add (point);
+ seen->add (point);
+ worklist->safe_push (point);
+ }
+}
+
+/* Determine if REG_A and REG_B express writing to exactly the same
+ set of bits.
+ For example, when "s.field" is the only field of "s", and there's no
+ padding, this should return true. */
+
+static bool
+same_binding_p (const region *reg_a, const region *reg_b,
+ store_manager *store_mgr)
+{
+ if (reg_a->get_base_region () != reg_b->get_base_region ())
+ return false;
+ const binding_key *bind_key_a = binding_key::make (store_mgr, reg_a);
+ const binding_key *bind_key_b = binding_key::make (store_mgr, reg_b);
+ return bind_key_a == bind_key_b;
+}
+
+/* Return true if STMT fully overwrites DECL. */
+
+static bool
+fully_overwrites_p (const gimple *stmt, tree decl,
+ const region_model &model)
+{
+ if (tree lhs = gimple_get_lhs (stmt))
+ {
+ /* Determine if LHS fully overwrites DECL.
+ We can't just check for equality; consider the case of
+ "s.field = EXPR;" where the stmt writes to the only field
+ of "s", and there's no padding. */
+ const region *lhs_reg = model.get_lvalue (lhs, NULL);
+ const region *decl_reg = model.get_lvalue (decl, NULL);
+ if (same_binding_p (lhs_reg, decl_reg,
+ model.get_manager ()->get_store_manager ()))
+ return true;
+ }
+ return false;
+}
+
+/* Process POINT, popped from *WORKLIST.
+ Iterate over predecessors of POINT, adding to *WORKLIST and *SEEN,
+ until we get to a stmt that fully overwrites the decl. */
+
+void
+state_purge_per_decl::
+process_point_backwards (const function_point &point,
+ auto_vec<function_point> *worklist,
+ point_set_t *seen,
+ const state_purge_map &map,
+ const region_model &model)
+{
+ logger *logger = map.get_logger ();
+ LOG_FUNC (logger);
+ if (logger)
+ {
+ logger->start_log_line ();
+ logger->log_partial ("considering point: '");
+ point.print (logger->get_printer (), format (false));
+ logger->log_partial ("' for %qE", m_decl);
+ logger->end_log_line ();
+ }
+ const supernode *snode = point.get_supernode ();
+
+ switch (point.get_kind ())
+ {
+ default:
+ gcc_unreachable ();
+
+ case PK_ORIGIN:
+ break;
+
+ case PK_BEFORE_SUPERNODE:
+ {
+ /* Add given pred to worklist. */
+ if (point.get_from_edge ())
+ {
+ gcc_assert (point.get_from_edge ()->m_src);
+ add_to_worklist
+ (function_point::after_supernode (point.get_from_edge ()->m_src),
+ worklist, seen, logger);
+ }
+ else
+ {
+ /* Add any intraprocedually edge for a call. */
+ if (snode->m_returning_call)
+ {
+ gcall *returning_call = snode->m_returning_call;
+ cgraph_edge *cedge
+ = supergraph_call_edge (snode->m_fun,
+ returning_call);
+ if(cedge)
+ {
+ superedge *sedge
+ = map.get_sg ().get_intraprocedural_edge_for_call (cedge);
+ gcc_assert (sedge);
+ add_to_worklist
+ (function_point::after_supernode (sedge->m_src),
+ worklist, seen, logger);
+ }
+ else
+ {
+ supernode *callernode
+ = map.get_sg ().get_supernode_for_stmt (returning_call);
+
+ gcc_assert (callernode);
+ add_to_worklist
+ (function_point::after_supernode (callernode),
+ worklist, seen, logger);
+ }
+ }
+ }
+ }
+ break;
+
+ case PK_BEFORE_STMT:
+ {
+ /* This is somewhat equivalent to how the SSA case handles
+ def-stmts. */
+ if (fully_overwrites_p (point.get_stmt (), m_decl, model))
+ {
+ if (logger)
+ logger->log ("stmt fully overwrites %qE; terminating", m_decl);
+ return;
+ }
+ if (point.get_stmt_idx () > 0)
+ add_to_worklist (function_point::before_stmt
+ (snode, point.get_stmt_idx () - 1),
+ worklist, seen, logger);
+ else
+ {
+ /* Add before_supernode to worklist. This captures the in-edge,
+ so we have to do it once per in-edge. */
+ unsigned i;
+ superedge *pred;
+ FOR_EACH_VEC_ELT (snode->m_preds, i, pred)
+ add_to_worklist (function_point::before_supernode (snode,
+ pred),
+ worklist, seen, logger);
+ }
+ }
+ break;
+
+ case PK_AFTER_SUPERNODE:
+ {
+ if (snode->m_stmts.length ())
+ add_to_worklist
+ (function_point::before_stmt (snode,
+ snode->m_stmts.length () - 1),
+ worklist, seen, logger);
+ else
+ {
+ /* Add before_supernode to worklist. This captures the in-edge,
+ so we have to do it once per in-edge. */
+ unsigned i;
+ superedge *pred;
+ FOR_EACH_VEC_ELT (snode->m_preds, i, pred)
+ add_to_worklist (function_point::before_supernode (snode,
+ pred),
+ worklist, seen, logger);
+ }
+ }
+ break;
+ }
+
+}
+
+/* Process POINT, popped from *WORKLIST.
+ Iterate over successors of POINT, adding to *WORKLIST and *SEEN. */
+
+void
+state_purge_per_decl::
+process_point_forwards (const function_point &point,
+ auto_vec<function_point> *worklist,
+ point_set_t *seen,
+ const state_purge_map &map)
+{
+ logger *logger = map.get_logger ();
+ LOG_FUNC (logger);
+ if (logger)
+ {
+ logger->start_log_line ();
+ logger->log_partial ("considering point: '");
+ point.print (logger->get_printer (), format (false));
+ logger->log_partial ("' for %qE", m_decl);
+ logger->end_log_line ();
+ }
+ const supernode *snode = point.get_supernode ();
+
+ switch (point.get_kind ())
+ {
+ default:
+ case PK_ORIGIN:
+ gcc_unreachable ();
+
+ case PK_BEFORE_SUPERNODE:
+ {
+ function_point next = point.get_next ();
+ add_to_worklist (next, worklist, seen, logger);
+ }
+ break;
+
+ case PK_BEFORE_STMT:
+ {
+ /* Perhaps we should stop at a clobber of the decl,
+ but that ought to purge state for us explicitly. */
+ function_point next = point.get_next ();
+ add_to_worklist (next, worklist, seen, logger);
+ }
+ break;
+
+ case PK_AFTER_SUPERNODE:
+ {
+ /* Look at interprocedural out-edges. */
+ unsigned i;
+ superedge *succ;
+ FOR_EACH_VEC_ELT (snode->m_succs, i, succ)
+ {
+ enum edge_kind kind = succ->get_kind ();
+ if (kind == SUPEREDGE_CFG_EDGE
+ || kind == SUPEREDGE_INTRAPROCEDURAL_CALL)
+ add_to_worklist (function_point::before_supernode (succ->m_dest,
+ succ),
+ worklist, seen, logger);
+ }
+ }
+ break;
+ }
+}
+
+/* Return true if the decl is needed at POINT. */
+
+bool
+state_purge_per_decl::needed_at_point_p (const function_point &point) const
+{
+ return const_cast <point_set_t &> (m_points_needing_decl).contains (point);
+}
+
/* class state_purge_annotator : public dot_annotator. */
/* Implementation of dot_annotator::add_node_annotations vfunc for
print_needed (gv, before_stmt, true);
}
-/* Get the ssa names needed and not-needed at POINT, and print them to GV.
+/* Get the decls and SSA names needed and not-needed at POINT, and
+ print them to GV.
If WITHIN_TABLE is true, print them within <TR> elements. */
void
{
auto_vec<tree> needed;
auto_vec<tree> not_needed;
- for (state_purge_map::iterator iter = m_map->begin ();
- iter != m_map->end ();
+ for (state_purge_map::ssa_iterator iter = m_map->begin_ssas ();
+ iter != m_map->end_ssas ();
++iter)
{
tree name = (*iter).first;
not_needed.safe_push (name);
}
}
+ for (state_purge_map::decl_iterator iter = m_map->begin_decls ();
+ iter != m_map->end_decls ();
+ ++iter)
+ {
+ tree decl = (*iter).first;
+ state_purge_per_decl *per_decl_data = (*iter).second;
+ if (per_decl_data->get_function () == point.get_function ())
+ {
+ if (per_decl_data->needed_at_point_p (point))
+ needed.safe_push (decl);
+ else
+ not_needed.safe_push (decl);
+ }
+ }
print_vec_of_names (gv, "needed here", needed, within_table);
print_vec_of_names (gv, "not needed here", not_needed, within_table);
namespace ana {
-/* The result of analyzing which SSA names can be purged from state at
+/* The result of analyzing which decls and SSA names can be purged from state at
different points in the program, so that we can simplify program_state
objects, in the hope of reducing state-blowup. */
class state_purge_map : public log_user
{
public:
- typedef ordered_hash_map<tree, state_purge_per_ssa_name *> map_t;
- typedef map_t::iterator iterator;
+ typedef ordered_hash_map<tree, state_purge_per_ssa_name *> ssa_map_t;
+ typedef ssa_map_t::iterator ssa_iterator;
- state_purge_map (const supergraph &sg, logger *logger);
+ typedef ordered_hash_map<tree, state_purge_per_decl *> decl_map_t;
+ typedef decl_map_t::iterator decl_iterator;
+
+ state_purge_map (const supergraph &sg,
+ region_model_manager *mgr,
+ logger *logger);
~state_purge_map ();
const state_purge_per_ssa_name &get_data_for_ssa_name (tree name) const
gcc_assert (!VAR_DECL_IS_VIRTUAL_OPERAND (var));
state_purge_per_ssa_name **slot
- = const_cast <map_t&> (m_map).get (name);
+ = const_cast <ssa_map_t&> (m_ssa_map).get (name);
return **slot;
}
+ const state_purge_per_decl *get_any_data_for_decl (tree decl) const
+ {
+ gcc_assert (TREE_CODE (decl) == VAR_DECL
+ || TREE_CODE (decl) == PARM_DECL
+ || TREE_CODE (decl) == RESULT_DECL);
+ if (state_purge_per_decl **slot
+ = const_cast <decl_map_t&> (m_decl_map).get (decl))
+ return *slot;
+ else
+ return NULL;
+ }
+
+ state_purge_per_decl &get_or_create_data_for_decl (function *fun, tree decl);
+
const supergraph &get_sg () const { return m_sg; }
- iterator begin () const { return m_map.begin (); }
- iterator end () const { return m_map.end (); }
+ ssa_iterator begin_ssas () const { return m_ssa_map.begin (); }
+ ssa_iterator end_ssas () const { return m_ssa_map.end (); }
+
+ decl_iterator begin_decls () const { return m_decl_map.begin (); }
+ decl_iterator end_decls () const { return m_decl_map.end (); }
private:
DISABLE_COPY_AND_ASSIGN (state_purge_map);
const supergraph &m_sg;
- map_t m_map;
+ ssa_map_t m_ssa_map;
+ decl_map_t m_decl_map;
+};
+
+ /* Base class for state_purge_per_ssa_name and state_purge_per_decl. */
+
+class state_purge_per_tree
+{
+public:
+ function *get_function () const { return m_fun; }
+ tree get_fndecl () const { return m_fun->decl; }
+
+protected:
+ typedef hash_set<function_point> point_set_t;
+
+ state_purge_per_tree (function *fun)
+ : m_fun (fun)
+ {
+ }
+
+private:
+ function *m_fun;
};
/* The part of a state_purge_map relating to a specific SSA name.
their successor states, so that we can simplify program_state objects,
in the hope of reducing state-blowup. */
-class state_purge_per_ssa_name
+class state_purge_per_ssa_name : public state_purge_per_tree
{
public:
state_purge_per_ssa_name (const state_purge_map &map,
bool needed_at_point_p (const function_point &point) const;
- function *get_function () const { return m_fun; }
-
private:
static function_point before_use_stmt (const state_purge_map &map,
const gimple *use_stmt);
auto_vec<function_point> *worklist,
const state_purge_map &map);
- typedef hash_set<function_point> point_set_t;
point_set_t m_points_needing_name;
tree m_name;
- function *m_fun;
+};
+
+/* The part of a state_purge_map relating to a specific decl.
+
+ Analogous to state_purge_per_ssa_name, but for local decls.
+
+ This is more involved than the SSA name case, because we also need
+ to handle pointers and components. */
+
+class state_purge_per_decl : public state_purge_per_tree
+{
+public:
+ state_purge_per_decl (const state_purge_map &map,
+ tree decl,
+ function *fun);
+
+ bool needed_at_point_p (const function_point &point) const;
+
+ void add_needed_at (const function_point &point);
+ void add_pointed_to_at (const function_point &point);
+ void process_worklists (const state_purge_map &map,
+ region_model_manager *mgr);
+
+private:
+ static function_point before_use_stmt (const state_purge_map &map,
+ const gimple *use_stmt);
+
+ void add_to_worklist (const function_point &point,
+ auto_vec<function_point> *worklist,
+ point_set_t *seen,
+ logger *logger);
+
+ void process_point_backwards (const function_point &point,
+ auto_vec<function_point> *worklist,
+ point_set_t *seen,
+ const state_purge_map &map,
+ const region_model &model);
+ void process_point_forwards (const function_point &point,
+ auto_vec<function_point> *worklist,
+ point_set_t *seen,
+ const state_purge_map &map);
+
+ point_set_t m_points_needing_decl;
+ point_set_t m_points_taking_address;
+ tree m_decl;
};
/* Subclass of dot_annotator for use by -fdump-analyzer-state-purge.