/* Vectorizer
- Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
- Free Software Foundation, Inc.
+ Copyright (C) 2003-2015 Free Software Foundation, Inc.
Contributed by Dorit Naishlos <dorit@il.ibm.com>
This file is part of GCC.
#include "system.h"
#include "coretypes.h"
#include "dumpfile.h"
-#include "tm.h"
-#include "ggc.h"
+#include "backend.h"
+#include "predict.h"
#include "tree.h"
+#include "gimple.h"
+#include "hard-reg-set.h"
+#include "ssa.h"
+#include "alias.h"
+#include "fold-const.h"
+#include "stor-layout.h"
#include "tree-pretty-print.h"
-#include "tree-flow.h"
+#include "internal-fn.h"
+#include "gimple-iterator.h"
+#include "gimple-walk.h"
+#include "cgraph.h"
+#include "tree-ssa-loop-manip.h"
+#include "tree-cfg.h"
#include "cfgloop.h"
#include "tree-vectorizer.h"
#include "tree-pass.h"
+#include "tree-ssa-propagate.h"
+#include "dbgcnt.h"
+#include "gimple-fold.h"
+#include "tree-scalar-evolution.h"
+
/* Loop or bb location. */
-LOC vect_location;
+source_location vect_location;
/* Vector mapping GIMPLE stmt to stmt_vec_info. */
-vec<vec_void_p> stmt_vec_info_vec;
+vec<stmt_vec_info> stmt_vec_info_vec;
+\f
+/* For mapping simduid to vectorization factor. */
+
+struct simduid_to_vf : free_ptr_hash<simduid_to_vf>
+{
+ unsigned int simduid;
+ int vf;
+
+ /* hash_table support. */
+ static inline hashval_t hash (const simduid_to_vf *);
+ static inline int equal (const simduid_to_vf *, const simduid_to_vf *);
+};
+
+inline hashval_t
+simduid_to_vf::hash (const simduid_to_vf *p)
+{
+ return p->simduid;
+}
+
+inline int
+simduid_to_vf::equal (const simduid_to_vf *p1, const simduid_to_vf *p2)
+{
+ return p1->simduid == p2->simduid;
+}
+
+/* This hash maps the OMP simd array to the corresponding simduid used
+ to index into it. Like thus,
+
+ _7 = GOMP_SIMD_LANE (simduid.0)
+ ...
+ ...
+ D.1737[_7] = stuff;
+
+
+ This hash maps from the OMP simd array (D.1737[]) to DECL_UID of
+ simduid.0. */
+
+struct simd_array_to_simduid : free_ptr_hash<simd_array_to_simduid>
+{
+ tree decl;
+ unsigned int simduid;
+
+ /* hash_table support. */
+ static inline hashval_t hash (const simd_array_to_simduid *);
+ static inline int equal (const simd_array_to_simduid *,
+ const simd_array_to_simduid *);
+};
+
+inline hashval_t
+simd_array_to_simduid::hash (const simd_array_to_simduid *p)
+{
+ return DECL_UID (p->decl);
+}
+
+inline int
+simd_array_to_simduid::equal (const simd_array_to_simduid *p1,
+ const simd_array_to_simduid *p2)
+{
+ return p1->decl == p2->decl;
+}
+
+/* Fold IFN_GOMP_SIMD_LANE, IFN_GOMP_SIMD_VF, IFN_GOMP_SIMD_LAST_LANE,
+ into their corresponding constants and remove
+ IFN_GOMP_SIMD_ORDERED_{START,END}. */
+
+static void
+adjust_simduid_builtins (hash_table<simduid_to_vf> *htab)
+{
+ basic_block bb;
+
+ FOR_EACH_BB_FN (bb, cfun)
+ {
+ gimple_stmt_iterator i;
+
+ for (i = gsi_start_bb (bb); !gsi_end_p (i); )
+ {
+ unsigned int vf = 1;
+ enum internal_fn ifn;
+ gimple *stmt = gsi_stmt (i);
+ tree t;
+ if (!is_gimple_call (stmt)
+ || !gimple_call_internal_p (stmt))
+ {
+ gsi_next (&i);
+ continue;
+ }
+ ifn = gimple_call_internal_fn (stmt);
+ switch (ifn)
+ {
+ case IFN_GOMP_SIMD_LANE:
+ case IFN_GOMP_SIMD_VF:
+ case IFN_GOMP_SIMD_LAST_LANE:
+ break;
+ case IFN_GOMP_SIMD_ORDERED_START:
+ case IFN_GOMP_SIMD_ORDERED_END:
+ gsi_remove (&i, true);
+ unlink_stmt_vdef (stmt);
+ continue;
+ default:
+ gsi_next (&i);
+ continue;
+ }
+ tree arg = gimple_call_arg (stmt, 0);
+ gcc_assert (arg != NULL_TREE);
+ gcc_assert (TREE_CODE (arg) == SSA_NAME);
+ simduid_to_vf *p = NULL, data;
+ data.simduid = DECL_UID (SSA_NAME_VAR (arg));
+ if (htab)
+ {
+ p = htab->find (&data);
+ if (p)
+ vf = p->vf;
+ }
+ switch (ifn)
+ {
+ case IFN_GOMP_SIMD_VF:
+ t = build_int_cst (unsigned_type_node, vf);
+ break;
+ case IFN_GOMP_SIMD_LANE:
+ t = build_int_cst (unsigned_type_node, 0);
+ break;
+ case IFN_GOMP_SIMD_LAST_LANE:
+ t = gimple_call_arg (stmt, 1);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ update_call_from_tree (&i, t);
+ gsi_next (&i);
+ }
+ }
+}
+
+/* Helper structure for note_simd_array_uses. */
+
+struct note_simd_array_uses_struct
+{
+ hash_table<simd_array_to_simduid> **htab;
+ unsigned int simduid;
+};
+
+/* Callback for note_simd_array_uses, called through walk_gimple_op. */
+static tree
+note_simd_array_uses_cb (tree *tp, int *walk_subtrees, void *data)
+{
+ struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
+ struct note_simd_array_uses_struct *ns
+ = (struct note_simd_array_uses_struct *) wi->info;
+
+ if (TYPE_P (*tp))
+ *walk_subtrees = 0;
+ else if (VAR_P (*tp)
+ && lookup_attribute ("omp simd array", DECL_ATTRIBUTES (*tp))
+ && DECL_CONTEXT (*tp) == current_function_decl)
+ {
+ simd_array_to_simduid data;
+ if (!*ns->htab)
+ *ns->htab = new hash_table<simd_array_to_simduid> (15);
+ data.decl = *tp;
+ data.simduid = ns->simduid;
+ simd_array_to_simduid **slot = (*ns->htab)->find_slot (&data, INSERT);
+ if (*slot == NULL)
+ {
+ simd_array_to_simduid *p = XNEW (simd_array_to_simduid);
+ *p = data;
+ *slot = p;
+ }
+ else if ((*slot)->simduid != ns->simduid)
+ (*slot)->simduid = -1U;
+ *walk_subtrees = 0;
+ }
+ return NULL_TREE;
+}
+
+/* Find "omp simd array" temporaries and map them to corresponding
+ simduid. */
+
+static void
+note_simd_array_uses (hash_table<simd_array_to_simduid> **htab)
+{
+ basic_block bb;
+ gimple_stmt_iterator gsi;
+ struct walk_stmt_info wi;
+ struct note_simd_array_uses_struct ns;
+
+ memset (&wi, 0, sizeof (wi));
+ wi.info = &ns;
+ ns.htab = htab;
+
+ FOR_EACH_BB_FN (bb, cfun)
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple *stmt = gsi_stmt (gsi);
+ if (!is_gimple_call (stmt) || !gimple_call_internal_p (stmt))
+ continue;
+ switch (gimple_call_internal_fn (stmt))
+ {
+ case IFN_GOMP_SIMD_LANE:
+ case IFN_GOMP_SIMD_VF:
+ case IFN_GOMP_SIMD_LAST_LANE:
+ break;
+ default:
+ continue;
+ }
+ tree lhs = gimple_call_lhs (stmt);
+ if (lhs == NULL_TREE)
+ continue;
+ imm_use_iterator use_iter;
+ gimple *use_stmt;
+ ns.simduid = DECL_UID (SSA_NAME_VAR (gimple_call_arg (stmt, 0)));
+ FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, lhs)
+ if (!is_gimple_debug (use_stmt))
+ walk_gimple_op (use_stmt, note_simd_array_uses_cb, &wi);
+ }
+}
+
+/* Shrink arrays with "omp simd array" attribute to the corresponding
+ vectorization factor. */
+
+static void
+shrink_simd_arrays
+ (hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab,
+ hash_table<simduid_to_vf> *simduid_to_vf_htab)
+{
+ for (hash_table<simd_array_to_simduid>::iterator iter
+ = simd_array_to_simduid_htab->begin ();
+ iter != simd_array_to_simduid_htab->end (); ++iter)
+ if ((*iter)->simduid != -1U)
+ {
+ tree decl = (*iter)->decl;
+ int vf = 1;
+ if (simduid_to_vf_htab)
+ {
+ simduid_to_vf *p = NULL, data;
+ data.simduid = (*iter)->simduid;
+ p = simduid_to_vf_htab->find (&data);
+ if (p)
+ vf = p->vf;
+ }
+ tree atype
+ = build_array_type_nelts (TREE_TYPE (TREE_TYPE (decl)), vf);
+ TREE_TYPE (decl) = atype;
+ relayout_decl (decl);
+ }
+
+ delete simd_array_to_simduid_htab;
+}
\f
+/* A helper function to free data refs. */
+
+void
+vect_destroy_datarefs (vec_info *vinfo)
+{
+ struct data_reference *dr;
+ unsigned int i;
+
+ FOR_EACH_VEC_ELT (vinfo->datarefs, i, dr)
+ if (dr->aux)
+ {
+ free (dr->aux);
+ dr->aux = NULL;
+ }
+
+ free_data_refs (vinfo->datarefs);
+}
+
+
+/* If LOOP has been versioned during ifcvt, return the internal call
+ guarding it. */
+
+static gimple *
+vect_loop_vectorized_call (struct loop *loop)
+{
+ basic_block bb = loop_preheader_edge (loop)->src;
+ gimple *g;
+ do
+ {
+ g = last_stmt (bb);
+ if (g)
+ break;
+ if (!single_pred_p (bb))
+ break;
+ bb = single_pred (bb);
+ }
+ while (1);
+ if (g && gimple_code (g) == GIMPLE_COND)
+ {
+ gimple_stmt_iterator gsi = gsi_for_stmt (g);
+ gsi_prev (&gsi);
+ if (!gsi_end_p (gsi))
+ {
+ g = gsi_stmt (gsi);
+ if (is_gimple_call (g)
+ && gimple_call_internal_p (g)
+ && gimple_call_internal_fn (g) == IFN_LOOP_VECTORIZED
+ && (tree_to_shwi (gimple_call_arg (g, 0)) == loop->num
+ || tree_to_shwi (gimple_call_arg (g, 1)) == loop->num))
+ return g;
+ }
+ }
+ return NULL;
+}
+
+/* Fold LOOP_VECTORIZED internal call G to VALUE and
+ update any immediate uses of it's LHS. */
+
+static void
+fold_loop_vectorized_call (gimple *g, tree value)
+{
+ tree lhs = gimple_call_lhs (g);
+ use_operand_p use_p;
+ imm_use_iterator iter;
+ gimple *use_stmt;
+ gimple_stmt_iterator gsi = gsi_for_stmt (g);
+
+ update_call_from_tree (&gsi, value);
+ FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
+ {
+ FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
+ SET_USE (use_p, value);
+ update_stmt (use_stmt);
+ }
+}
+/* Set the uids of all the statements in basic blocks inside loop
+ represented by LOOP_VINFO. LOOP_VECTORIZED_CALL is the internal
+ call guarding the loop which has been if converted. */
+static void
+set_uid_loop_bbs (loop_vec_info loop_vinfo, gimple *loop_vectorized_call)
+{
+ tree arg = gimple_call_arg (loop_vectorized_call, 1);
+ basic_block *bbs;
+ unsigned int i;
+ struct loop *scalar_loop = get_loop (cfun, tree_to_shwi (arg));
+
+ LOOP_VINFO_SCALAR_LOOP (loop_vinfo) = scalar_loop;
+ gcc_checking_assert (vect_loop_vectorized_call
+ (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
+ == loop_vectorized_call);
+ bbs = get_loop_body (scalar_loop);
+ for (i = 0; i < scalar_loop->num_nodes; i++)
+ {
+ basic_block bb = bbs[i];
+ gimple_stmt_iterator gsi;
+ for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple *phi = gsi_stmt (gsi);
+ gimple_set_uid (phi, 0);
+ }
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple *stmt = gsi_stmt (gsi);
+ gimple_set_uid (stmt, 0);
+ }
+ }
+ free (bbs);
+}
+
/* Function vectorize_loops.
Entry point to loop vectorization phase. */
unsigned int i;
unsigned int num_vectorized_loops = 0;
unsigned int vect_loops_num;
- loop_iterator li;
struct loop *loop;
+ hash_table<simduid_to_vf> *simduid_to_vf_htab = NULL;
+ hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
+ bool any_ifcvt_loops = false;
+ unsigned ret = 0;
- vect_loops_num = number_of_loops ();
+ vect_loops_num = number_of_loops (cfun);
/* Bail out if there are no loops. */
if (vect_loops_num <= 1)
return 0;
+ if (cfun->has_simduid_loops)
+ note_simd_array_uses (&simd_array_to_simduid_htab);
+
init_stmt_vec_info_vec ();
/* ----------- Analyze loops. ----------- */
/* If some loop was duplicated, it gets bigger number
than all previously defined loops. This fact allows us to run
only over initial loops skipping newly generated ones. */
- FOR_EACH_LOOP (li, loop, 0)
- if (optimize_loop_nest_for_speed_p (loop))
+ FOR_EACH_LOOP (loop, 0)
+ if (loop->dont_vectorize)
+ any_ifcvt_loops = true;
+ else if ((flag_tree_loop_vectorize
+ && optimize_loop_nest_for_speed_p (loop))
+ || loop->force_vectorize)
{
loop_vec_info loop_vinfo;
vect_location = find_loop_location (loop);
- if (LOCATION_LOCUS (vect_location) != UNKNOWN_LOC
+ if (LOCATION_LOCUS (vect_location) != UNKNOWN_LOCATION
&& dump_enabled_p ())
- dump_printf (MSG_ALL, "\nAnalyzing loop at %s:%d\n",
- LOC_FILE (vect_location), LOC_LINE (vect_location));
+ dump_printf (MSG_NOTE, "\nAnalyzing loop at %s:%d\n",
+ LOCATION_FILE (vect_location),
+ LOCATION_LINE (vect_location));
loop_vinfo = vect_analyze_loop (loop);
loop->aux = loop_vinfo;
if (!loop_vinfo || !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo))
continue;
- if (LOCATION_LOCUS (vect_location) != UNKNOWN_LOC
+ if (!dbg_cnt (vect_loop))
+ break;
+
+ gimple *loop_vectorized_call = vect_loop_vectorized_call (loop);
+ if (loop_vectorized_call)
+ set_uid_loop_bbs (loop_vinfo, loop_vectorized_call);
+ if (LOCATION_LOCUS (vect_location) != UNKNOWN_LOCATION
&& dump_enabled_p ())
- dump_printf (MSG_ALL, "\n\nVectorizing loop at %s:%d\n",
- LOC_FILE (vect_location), LOC_LINE (vect_location));
+ dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
+ "loop vectorized\n");
vect_transform_loop (loop_vinfo);
num_vectorized_loops++;
+ /* Now that the loop has been vectorized, allow it to be unrolled
+ etc. */
+ loop->force_vectorize = false;
+
+ if (loop->simduid)
+ {
+ simduid_to_vf *simduid_to_vf_data = XNEW (simduid_to_vf);
+ if (!simduid_to_vf_htab)
+ simduid_to_vf_htab = new hash_table<simduid_to_vf> (15);
+ simduid_to_vf_data->simduid = DECL_UID (loop->simduid);
+ simduid_to_vf_data->vf = loop_vinfo->vectorization_factor;
+ *simduid_to_vf_htab->find_slot (simduid_to_vf_data, INSERT)
+ = simduid_to_vf_data;
+ }
+
+ if (loop_vectorized_call)
+ {
+ fold_loop_vectorized_call (loop_vectorized_call, boolean_true_node);
+ ret |= TODO_cleanup_cfg;
+ }
}
- vect_location = UNKNOWN_LOC;
+ vect_location = UNKNOWN_LOCATION;
statistics_counter_event (cfun, "Vectorized loops", num_vectorized_loops);
if (dump_enabled_p ()
|| (num_vectorized_loops > 0 && dump_enabled_p ()))
- dump_printf_loc (MSG_ALL, vect_location,
+ dump_printf_loc (MSG_NOTE, vect_location,
"vectorized %u loops in function.\n",
num_vectorized_loops);
/* ----------- Finalize. ----------- */
+ if (any_ifcvt_loops)
+ for (i = 1; i < vect_loops_num; i++)
+ {
+ loop = get_loop (cfun, i);
+ if (loop && loop->dont_vectorize)
+ {
+ gimple *g = vect_loop_vectorized_call (loop);
+ if (g)
+ {
+ fold_loop_vectorized_call (g, boolean_false_node);
+ ret |= TODO_cleanup_cfg;
+ }
+ }
+ }
+
for (i = 1; i < vect_loops_num; i++)
{
loop_vec_info loop_vinfo;
- loop = get_loop (i);
+ loop = get_loop (cfun, i);
if (!loop)
continue;
loop_vinfo = (loop_vec_info) loop->aux;
free_stmt_vec_info_vec ();
- return num_vectorized_loops > 0 ? TODO_cleanup_cfg : 0;
+ /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
+ if (cfun->has_simduid_loops)
+ adjust_simduid_builtins (simduid_to_vf_htab);
+
+ /* Shrink any "omp array simd" temporary arrays to the
+ actual vectorization factors. */
+ if (simd_array_to_simduid_htab)
+ shrink_simd_arrays (simd_array_to_simduid_htab, simduid_to_vf_htab);
+ delete simduid_to_vf_htab;
+ cfun->has_simduid_loops = false;
+
+ if (num_vectorized_loops > 0)
+ {
+ /* If we vectorized any loop only virtual SSA form needs to be updated.
+ ??? Also while we try hard to update loop-closed SSA form we fail
+ to properly do this in some corner-cases (see PR56286). */
+ rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa_only_virtuals);
+ return TODO_cleanup_cfg;
+ }
+
+ return ret;
+}
+
+
+/* Entry point to the simduid cleanup pass. */
+
+namespace {
+
+const pass_data pass_data_simduid_cleanup =
+{
+ GIMPLE_PASS, /* type */
+ "simduid", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_NONE, /* tv_id */
+ ( PROP_ssa | PROP_cfg ), /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
+};
+
+class pass_simduid_cleanup : public gimple_opt_pass
+{
+public:
+ pass_simduid_cleanup (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_simduid_cleanup, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ opt_pass * clone () { return new pass_simduid_cleanup (m_ctxt); }
+ virtual bool gate (function *fun) { return fun->has_simduid_loops; }
+ virtual unsigned int execute (function *);
+
+}; // class pass_simduid_cleanup
+
+unsigned int
+pass_simduid_cleanup::execute (function *fun)
+{
+ hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
+
+ note_simd_array_uses (&simd_array_to_simduid_htab);
+
+ /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
+ adjust_simduid_builtins (NULL);
+
+ /* Shrink any "omp array simd" temporary arrays to the
+ actual vectorization factors. */
+ if (simd_array_to_simduid_htab)
+ shrink_simd_arrays (simd_array_to_simduid_htab, NULL);
+ fun->has_simduid_loops = false;
+ return 0;
+}
+
+} // anon namespace
+
+gimple_opt_pass *
+make_pass_simduid_cleanup (gcc::context *ctxt)
+{
+ return new pass_simduid_cleanup (ctxt);
}
/* Entry point to basic block SLP phase. */
-static unsigned int
-execute_vect_slp (void)
+namespace {
+
+const pass_data pass_data_slp_vectorize =
+{
+ GIMPLE_PASS, /* type */
+ "slp", /* name */
+ OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
+ TV_TREE_SLP_VECTORIZATION, /* tv_id */
+ ( PROP_ssa | PROP_cfg ), /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_update_ssa, /* todo_flags_finish */
+};
+
+class pass_slp_vectorize : public gimple_opt_pass
+{
+public:
+ pass_slp_vectorize (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_slp_vectorize, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ opt_pass * clone () { return new pass_slp_vectorize (m_ctxt); }
+ virtual bool gate (function *) { return flag_tree_slp_vectorize != 0; }
+ virtual unsigned int execute (function *);
+
+}; // class pass_slp_vectorize
+
+unsigned int
+pass_slp_vectorize::execute (function *fun)
{
basic_block bb;
+ bool in_loop_pipeline = scev_initialized_p ();
+ if (!in_loop_pipeline)
+ {
+ loop_optimizer_init (LOOPS_NORMAL);
+ scev_initialize ();
+ }
+
init_stmt_vec_info_vec ();
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, fun)
{
vect_location = find_bb_location (bb);
if (vect_slp_analyze_bb (bb))
{
+ if (!dbg_cnt (vect_slp))
+ break;
+
vect_slp_transform_bb (bb);
if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
- "basic block vectorized using SLP\n");
+ "basic block vectorized\n");
}
}
free_stmt_vec_info_vec ();
+
+ if (!in_loop_pipeline)
+ {
+ scev_finalize ();
+ loop_optimizer_finalize ();
+ }
+
return 0;
}
-static bool
-gate_vect_slp (void)
-{
- /* Apply SLP either if the vectorizer is on and the user didn't specify
- whether to run SLP or not, or if the SLP flag was set by the user. */
- return ((flag_tree_vectorize != 0 && flag_tree_slp_vectorize != 0)
- || flag_tree_slp_vectorize == 1);
-}
+} // anon namespace
-struct gimple_opt_pass pass_slp_vectorize =
+gimple_opt_pass *
+make_pass_slp_vectorize (gcc::context *ctxt)
{
- {
- GIMPLE_PASS,
- "slp", /* name */
- OPTGROUP_LOOP
- | OPTGROUP_VEC, /* optinfo_flags */
- gate_vect_slp, /* gate */
- execute_vect_slp, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_TREE_SLP_VECTORIZATION, /* tv_id */
- PROP_ssa | PROP_cfg, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- TODO_ggc_collect
- | TODO_verify_ssa
- | TODO_update_ssa
- | TODO_verify_stmts /* todo_flags_finish */
- }
-};
+ return new pass_slp_vectorize (ctxt);
+}
/* Increase alignment of global arrays to improve vectorization potential.
static unsigned int
increase_alignment (void)
{
- struct varpool_node *vnode;
+ varpool_node *vnode;
+
+ vect_location = UNKNOWN_LOCATION;
/* Increase the alignment of all global arrays for vectorization. */
FOR_EACH_DEFINED_VARIABLE (vnode)
{
- tree vectype, decl = vnode->symbol.decl;
+ tree vectype, decl = vnode->decl;
tree t;
unsigned int alignment;
- t = TREE_TYPE(decl);
+ t = TREE_TYPE (decl);
if (TREE_CODE (t) != ARRAY_TYPE)
continue;
vectype = get_vectype_for_scalar_type (strip_array_types (t));
if (vect_can_force_dr_alignment_p (decl, alignment))
{
- DECL_ALIGN (decl) = TYPE_ALIGN (vectype);
- DECL_USER_ALIGN (decl) = 1;
+ vnode->increase_alignment (TYPE_ALIGN (vectype));
dump_printf (MSG_NOTE, "Increasing alignment of decl: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, decl);
dump_printf (MSG_NOTE, "\n");
}
-static bool
-gate_increase_alignment (void)
+namespace {
+
+const pass_data pass_data_ipa_increase_alignment =
{
- return flag_section_anchors && flag_tree_vectorize;
-}
+ SIMPLE_IPA_PASS, /* type */
+ "increase_alignment", /* name */
+ OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
+ TV_IPA_OPT, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
+};
+class pass_ipa_increase_alignment : public simple_ipa_opt_pass
+{
+public:
+ pass_ipa_increase_alignment (gcc::context *ctxt)
+ : simple_ipa_opt_pass (pass_data_ipa_increase_alignment, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ virtual bool gate (function *)
+ {
+ return flag_section_anchors && flag_tree_loop_vectorize;
+ }
+
+ virtual unsigned int execute (function *) { return increase_alignment (); }
-struct simple_ipa_opt_pass pass_ipa_increase_alignment =
+}; // class pass_ipa_increase_alignment
+
+} // anon namespace
+
+simple_ipa_opt_pass *
+make_pass_ipa_increase_alignment (gcc::context *ctxt)
{
- {
- SIMPLE_IPA_PASS,
- "increase_alignment", /* name */
- OPTGROUP_LOOP
- | OPTGROUP_VEC, /* optinfo_flags */
- gate_increase_alignment, /* gate */
- increase_alignment, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_IPA_OPT, /* tv_id */
- 0, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- 0 /* todo_flags_finish */
- }
-};
+ return new pass_ipa_increase_alignment (ctxt);
+}