/* Lower vector operations to scalar operations.
- Copyright (C) 2004-2014 Free Software Foundation, Inc.
+ Copyright (C) 2004-2018 Free Software Foundation, Inc.
This file is part of GCC.
#include "config.h"
#include "system.h"
#include "coretypes.h"
+#include "backend.h"
+#include "rtl.h"
#include "tree.h"
+#include "gimple.h"
+#include "tree-pass.h"
+#include "ssa.h"
+#include "expmed.h"
+#include "optabs-tree.h"
+#include "diagnostic.h"
+#include "fold-const.h"
#include "stor-layout.h"
-#include "tm.h"
#include "langhooks.h"
-#include "predict.h"
-#include "vec.h"
-#include "hashtab.h"
-#include "hash-set.h"
-#include "machmode.h"
-#include "hard-reg-set.h"
-#include "input.h"
-#include "function.h"
-#include "dominance.h"
-#include "cfg.h"
-#include "basic-block.h"
-#include "tree-ssa-alias.h"
-#include "internal-fn.h"
#include "tree-eh.h"
-#include "gimple-expr.h"
-#include "is-a.h"
-#include "gimple.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
-#include "gimple-ssa.h"
+#include "gimplify.h"
#include "tree-cfg.h"
-#include "stringpool.h"
-#include "tree-ssanames.h"
-#include "tree-iterator.h"
-#include "tree-pass.h"
-#include "flags.h"
-#include "diagnostic.h"
-#include "target.h"
-
-/* Need to include rtl.h, expr.h, etc. for optabs. */
-#include "expr.h"
-#include "insn-codes.h"
-#include "optabs.h"
+#include "tree-vector-builder.h"
+#include "vec-perm-indices.h"
static void expand_vector_operations_1 (gimple_stmt_iterator *);
+/* Return the number of elements in a vector type TYPE that we have
+ already decided needs to be expanded piecewise. We don't support
+ this kind of expansion for variable-length vectors, since we should
+ always check for target support before introducing uses of those. */
+static unsigned int
+nunits_for_known_piecewise_op (const_tree type)
+{
+ return TYPE_VECTOR_SUBPARTS (type);
+}
+
+/* Return true if TYPE1 has more elements than TYPE2, where either
+ type may be a vector or a scalar. */
+
+static inline bool
+subparts_gt (tree type1, tree type2)
+{
+ poly_uint64 n1 = VECTOR_TYPE_P (type1) ? TYPE_VECTOR_SUBPARTS (type1) : 1;
+ poly_uint64 n2 = VECTOR_TYPE_P (type2) ? TYPE_VECTOR_SUBPARTS (type2) : 1;
+ return known_gt (n1, n2);
+}
/* Build a constant of type TYPE, made of VALUE's bits replicated
every TYPE_SIZE (INNER_TYPE) bits to fit TYPE's precision. */
}
typedef tree (*elem_op_func) (gimple_stmt_iterator *,
- tree, tree, tree, tree, tree, enum tree_code);
+ tree, tree, tree, tree, tree, enum tree_code,
+ tree);
static inline tree
tree_vec_extract (gimple_stmt_iterator *gsi, tree type,
tree t, tree bitsize, tree bitpos)
{
+ if (TREE_CODE (t) == SSA_NAME)
+ {
+ gimple *def_stmt = SSA_NAME_DEF_STMT (t);
+ if (is_gimple_assign (def_stmt)
+ && (gimple_assign_rhs_code (def_stmt) == VECTOR_CST
+ || (bitpos
+ && gimple_assign_rhs_code (def_stmt) == CONSTRUCTOR)))
+ t = gimple_assign_rhs1 (def_stmt);
+ }
if (bitpos)
- return gimplify_build3 (gsi, BIT_FIELD_REF, type, t, bitsize, bitpos);
+ {
+ if (TREE_CODE (type) == BOOLEAN_TYPE)
+ {
+ tree itype
+ = build_nonstandard_integer_type (tree_to_uhwi (bitsize), 0);
+ tree field = gimplify_build3 (gsi, BIT_FIELD_REF, itype, t,
+ bitsize, bitpos);
+ return gimplify_build2 (gsi, NE_EXPR, type, field,
+ build_zero_cst (itype));
+ }
+ else
+ return gimplify_build3 (gsi, BIT_FIELD_REF, type, t, bitsize, bitpos);
+ }
else
return gimplify_build1 (gsi, VIEW_CONVERT_EXPR, type, t);
}
static tree
do_unop (gimple_stmt_iterator *gsi, tree inner_type, tree a,
tree b ATTRIBUTE_UNUSED, tree bitpos, tree bitsize,
- enum tree_code code)
+ enum tree_code code, tree type ATTRIBUTE_UNUSED)
{
a = tree_vec_extract (gsi, inner_type, a, bitsize, bitpos);
return gimplify_build1 (gsi, code, inner_type, a);
static tree
do_binop (gimple_stmt_iterator *gsi, tree inner_type, tree a, tree b,
- tree bitpos, tree bitsize, enum tree_code code)
+ tree bitpos, tree bitsize, enum tree_code code,
+ tree type ATTRIBUTE_UNUSED)
{
if (TREE_CODE (TREE_TYPE (a)) == VECTOR_TYPE)
a = tree_vec_extract (gsi, inner_type, a, bitsize, bitpos);
size equal to the size of INNER_TYPE. */
static tree
do_compare (gimple_stmt_iterator *gsi, tree inner_type, tree a, tree b,
- tree bitpos, tree bitsize, enum tree_code code)
+ tree bitpos, tree bitsize, enum tree_code code, tree type)
{
- tree comp_type;
+ tree stype = TREE_TYPE (type);
+ tree cst_false = build_zero_cst (stype);
+ tree cst_true = build_all_ones_cst (stype);
+ tree cmp;
a = tree_vec_extract (gsi, inner_type, a, bitsize, bitpos);
b = tree_vec_extract (gsi, inner_type, b, bitsize, bitpos);
- comp_type = build_nonstandard_integer_type
- (GET_MODE_BITSIZE (TYPE_MODE (inner_type)), 0);
-
- return gimplify_build3 (gsi, COND_EXPR, comp_type,
- fold_build2 (code, boolean_type_node, a, b),
- build_int_cst (comp_type, -1),
- build_int_cst (comp_type, 0));
+ cmp = build2 (code, boolean_type_node, a, b);
+ return gimplify_build3 (gsi, COND_EXPR, stype, cmp, cst_true, cst_false);
}
/* Expand vector addition to scalars. This does bit twiddling
static tree
do_plus_minus (gimple_stmt_iterator *gsi, tree word_type, tree a, tree b,
tree bitpos ATTRIBUTE_UNUSED, tree bitsize ATTRIBUTE_UNUSED,
- enum tree_code code)
+ enum tree_code code, tree type ATTRIBUTE_UNUSED)
{
tree inner_type = TREE_TYPE (TREE_TYPE (a));
unsigned HOST_WIDE_INT max;
do_negate (gimple_stmt_iterator *gsi, tree word_type, tree b,
tree unused ATTRIBUTE_UNUSED, tree bitpos ATTRIBUTE_UNUSED,
tree bitsize ATTRIBUTE_UNUSED,
- enum tree_code code ATTRIBUTE_UNUSED)
+ enum tree_code code ATTRIBUTE_UNUSED,
+ tree type ATTRIBUTE_UNUSED)
{
tree inner_type = TREE_TYPE (TREE_TYPE (b));
HOST_WIDE_INT max;
vec<constructor_elt, va_gc> *v;
tree part_width = TYPE_SIZE (inner_type);
tree index = bitsize_int (0);
- int nunits = TYPE_VECTOR_SUBPARTS (type);
+ int nunits = nunits_for_known_piecewise_op (type);
int delta = tree_to_uhwi (part_width)
/ tree_to_uhwi (TYPE_SIZE (TREE_TYPE (type)));
int i;
for (i = 0; i < nunits;
i += delta, index = int_const_binop (PLUS_EXPR, index, part_width))
{
- tree result = f (gsi, inner_type, a, b, index, part_width, code);
+ tree result = f (gsi, inner_type, a, b, index, part_width, code, type);
constructor_elt ce = {NULL_TREE, result};
v->quick_push (ce);
}
enum tree_code code)
{
tree result, compute_type;
- machine_mode mode;
int n_words = tree_to_uhwi (TYPE_SIZE_UNIT (type)) / UNITS_PER_WORD;
location_t loc = gimple_location (gsi_stmt (*gsi));
else
{
/* Use a single scalar operation with a mode no wider than word_mode. */
- mode = mode_for_size (tree_to_uhwi (TYPE_SIZE (type)), MODE_INT, 0);
+ scalar_int_mode mode
+ = int_mode_for_size (tree_to_uhwi (TYPE_SIZE (type)), 0).require ();
compute_type = lang_hooks.types.type_for_mode (mode, 1);
- result = f (gsi, compute_type, a, b, NULL_TREE, NULL_TREE, code);
+ result = f (gsi, compute_type, a, b, NULL_TREE, NULL_TREE, code, type);
warning_at (loc, OPT_Wvector_operation_performance,
"vector operation will be expanded with a "
"single scalar operation");
if (INTEGRAL_TYPE_P (TREE_TYPE (type))
&& parts_per_word >= 4
- && TYPE_VECTOR_SUBPARTS (type) >= 4)
+ && nunits_for_known_piecewise_op (type) >= 4)
return expand_vector_parallel (gsi, f_parallel,
type, a, b, code);
else
tree op1, enum tree_code code)
{
tree t;
- if (! expand_vec_cond_expr_p (type, TREE_TYPE (op0)))
+ if (!expand_vec_cmp_expr_p (TREE_TYPE (op0), type, code)
+ && !expand_vec_cond_expr_p (type, TREE_TYPE (op0), code))
t = expand_vector_piecewise (gsi, do_compare, type,
TREE_TYPE (TREE_TYPE (op0)), op0, op1, code);
else
add_rshift (gimple_stmt_iterator *gsi, tree type, tree op0, int *shiftcnts)
{
optab op;
- unsigned int i, nunits = TYPE_VECTOR_SUBPARTS (type);
+ unsigned int i, nunits = nunits_for_known_piecewise_op (type);
bool scalar_shift = true;
for (i = 1; i < nunits; i++)
if (op != unknown_optab
&& optab_handler (op, TYPE_MODE (type)) != CODE_FOR_nothing)
{
- tree *vec = XALLOCAVEC (tree, nunits);
+ tree_vector_builder vec (type, nunits, 1);
for (i = 0; i < nunits; i++)
- vec[i] = build_int_cst (TREE_TYPE (type), shiftcnts[i]);
- return gimplify_build2 (gsi, RSHIFT_EXPR, type, op0,
- build_vector (type, vec));
+ vec.quick_push (build_int_cst (TREE_TYPE (type), shiftcnts[i]));
+ return gimplify_build2 (gsi, RSHIFT_EXPR, type, op0, vec.build ());
}
return NULL_TREE;
bool has_vector_shift = true;
int mode = -1, this_mode;
int pre_shift = -1, post_shift;
- unsigned int nunits = TYPE_VECTOR_SUBPARTS (type);
+ unsigned int nunits = nunits_for_known_piecewise_op (type);
int *shifts = XALLOCAVEC (int, nunits * 4);
int *pre_shifts = shifts + nunits;
int *post_shifts = pre_shifts + nunits;
unsigned int i;
signop sign_p = TYPE_SIGN (TREE_TYPE (type));
unsigned HOST_WIDE_INT mask = GET_MODE_MASK (TYPE_MODE (TREE_TYPE (type)));
- tree *vec;
tree cur_op, mulcst, tem;
optab op;
unsigned HOST_WIDE_INT mh;
unsigned HOST_WIDE_INT d = TREE_INT_CST_LOW (cst) & mask;
- if (d >= ((unsigned HOST_WIDE_INT) 1 << (prec - 1)))
+ if (d >= (HOST_WIDE_INT_1U << (prec - 1)))
/* FIXME: Can transform this into op0 >= op1 ? 1 : 0. */
return NULL_TREE;
|| (!has_vector_shift && pre_shift != -1))
{
if (has_vector_shift)
- pre_shift = floor_log2 (d & -d);
+ pre_shift = ctz_or_zero (d);
else if (pre_shift == -1)
{
unsigned int j;
/* n rem d = n rem -d */
if (code == TRUNC_MOD_EXPR && d < 0)
d = abs_d;
- else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (prec - 1))
+ else if (abs_d == HOST_WIDE_INT_1U << (prec - 1))
{
/* This case is not handled correctly below. */
mode = -2;
choose_multiplier (abs_d, prec, prec - 1, &ml,
&post_shift, &dummy_int);
- if (ml >= (unsigned HOST_WIDE_INT) 1 << (prec - 1))
+ if (ml >= HOST_WIDE_INT_1U << (prec - 1))
{
this_mode = 4 + (d < 0);
- ml |= (~(unsigned HOST_WIDE_INT) 0) << (prec - 1);
+ ml |= HOST_WIDE_INT_M1U << (prec - 1);
}
else
this_mode = 2 + (d < 0);
mode = -2;
}
- vec = XALLOCAVEC (tree, nunits);
-
if (use_pow2)
{
tree addend = NULL_TREE;
}
}
if (addend == NULL_TREE
- && expand_vec_cond_expr_p (type, type))
+ && expand_vec_cond_expr_p (type, type, LT_EXPR))
{
- tree zero, cst, cond;
- gimple stmt;
+ tree zero, cst, cond, mask_type;
+ gimple *stmt;
+ mask_type = build_same_sized_truth_vector_type (type);
zero = build_zero_cst (type);
- cond = build2 (LT_EXPR, type, op0, zero);
+ cond = build2 (LT_EXPR, mask_type, op0, zero);
+ tree_vector_builder vec (type, nunits, 1);
for (i = 0; i < nunits; i++)
- vec[i] = build_int_cst (TREE_TYPE (type),
- ((unsigned HOST_WIDE_INT) 1
- << shifts[i]) - 1);
- cst = build_vector (type, vec);
- addend = make_ssa_name (type, NULL);
- stmt = gimple_build_assign_with_ops (VEC_COND_EXPR, addend,
- cond, cst, zero);
+ vec.quick_push (build_int_cst (TREE_TYPE (type),
+ (HOST_WIDE_INT_1U
+ << shifts[i]) - 1));
+ cst = vec.build ();
+ addend = make_ssa_name (type);
+ stmt = gimple_build_assign (addend, VEC_COND_EXPR, cond,
+ cst, zero);
gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
}
}
else
{
tree mask;
+ tree_vector_builder vec (type, nunits, 1);
for (i = 0; i < nunits; i++)
- vec[i] = build_int_cst (TREE_TYPE (type),
- ((unsigned HOST_WIDE_INT) 1
- << shifts[i]) - 1);
- mask = build_vector (type, vec);
+ vec.quick_push (build_int_cst (TREE_TYPE (type),
+ (HOST_WIDE_INT_1U
+ << shifts[i]) - 1));
+ mask = vec.build ();
op = optab_for_tree_code (BIT_AND_EXPR, type, optab_default);
if (op != unknown_optab
&& optab_handler (op, TYPE_MODE (type)) != CODE_FOR_nothing)
return NULL_TREE;
}
+ tree_vector_builder vec (type, nunits, 1);
for (i = 0; i < nunits; i++)
- vec[i] = build_int_cst (TREE_TYPE (type), mulc[i]);
- mulcst = build_vector (type, vec);
+ vec.quick_push (build_int_cst (TREE_TYPE (type), mulc[i]));
+ mulcst = vec.build ();
cur_op = gimplify_build2 (gsi, MULT_HIGHPART_EXPR, type, cur_op, mulcst);
static void
expand_vector_condition (gimple_stmt_iterator *gsi)
{
- gimple stmt = gsi_stmt (*gsi);
+ gassign *stmt = as_a <gassign *> (gsi_stmt (*gsi));
tree type = gimple_expr_type (stmt);
tree a = gimple_assign_rhs1 (stmt);
tree a1 = a;
- tree a2;
+ tree a2 = NULL_TREE;
bool a_is_comparison = false;
tree b = gimple_assign_rhs2 (stmt);
tree c = gimple_assign_rhs3 (stmt);
tree comp_inner_type = cond_type;
tree width = TYPE_SIZE (inner_type);
tree index = bitsize_int (0);
- int nunits = TYPE_VECTOR_SUBPARTS (type);
+ tree comp_width = width;
+ tree comp_index = index;
int i;
location_t loc = gimple_location (gsi_stmt (*gsi));
a1 = TREE_OPERAND (a, 0);
a2 = TREE_OPERAND (a, 1);
comp_inner_type = TREE_TYPE (TREE_TYPE (a1));
+ comp_width = TYPE_SIZE (comp_inner_type);
}
- if (expand_vec_cond_expr_p (type, TREE_TYPE (a1)))
+ if (expand_vec_cond_expr_p (type, TREE_TYPE (a1), TREE_CODE (a)))
return;
+ /* Handle vector boolean types with bitmasks. If there is a comparison
+ and we can expand the comparison into the vector boolean bitmask,
+ or otherwise if it is compatible with type, we can transform
+ vbfld_1 = x_2 < y_3 ? vbfld_4 : vbfld_5;
+ into
+ tmp_6 = x_2 < y_3;
+ tmp_7 = tmp_6 & vbfld_4;
+ tmp_8 = ~tmp_6;
+ tmp_9 = tmp_8 & vbfld_5;
+ vbfld_1 = tmp_7 | tmp_9;
+ Similarly for vbfld_10 instead of x_2 < y_3. */
+ if (VECTOR_BOOLEAN_TYPE_P (type)
+ && SCALAR_INT_MODE_P (TYPE_MODE (type))
+ && (GET_MODE_BITSIZE (TYPE_MODE (type))
+ < (TYPE_VECTOR_SUBPARTS (type)
+ * GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (type)))))
+ && (a_is_comparison
+ ? useless_type_conversion_p (type, TREE_TYPE (a))
+ : expand_vec_cmp_expr_p (TREE_TYPE (a1), type, TREE_CODE (a))))
+ {
+ if (a_is_comparison)
+ a = gimplify_build2 (gsi, TREE_CODE (a), type, a1, a2);
+ a1 = gimplify_build2 (gsi, BIT_AND_EXPR, type, a, b);
+ a2 = gimplify_build1 (gsi, BIT_NOT_EXPR, type, a);
+ a2 = gimplify_build2 (gsi, BIT_AND_EXPR, type, a2, c);
+ a = gimplify_build2 (gsi, BIT_IOR_EXPR, type, a1, a2);
+ gimple_assign_set_rhs_from_tree (gsi, a);
+ update_stmt (gsi_stmt (*gsi));
+ return;
+ }
+
/* TODO: try and find a smaller vector type. */
warning_at (loc, OPT_Wvector_operation_performance,
"vector condition will be expanded piecewise");
+ int nunits = nunits_for_known_piecewise_op (type);
vec_alloc (v, nunits);
- for (i = 0; i < nunits;
- i++, index = int_const_binop (PLUS_EXPR, index, width))
+ for (i = 0; i < nunits; i++)
{
tree aa, result;
tree bb = tree_vec_extract (gsi, inner_type, b, width, index);
tree cc = tree_vec_extract (gsi, inner_type, c, width, index);
if (a_is_comparison)
{
- tree aa1 = tree_vec_extract (gsi, comp_inner_type, a1, width, index);
- tree aa2 = tree_vec_extract (gsi, comp_inner_type, a2, width, index);
- aa = build2 (TREE_CODE (a), cond_type, aa1, aa2);
+ tree aa1 = tree_vec_extract (gsi, comp_inner_type, a1,
+ comp_width, comp_index);
+ tree aa2 = tree_vec_extract (gsi, comp_inner_type, a2,
+ comp_width, comp_index);
+ aa = fold_build2 (TREE_CODE (a), cond_type, aa1, aa2);
}
else
aa = tree_vec_extract (gsi, cond_type, a, width, index);
result = gimplify_build3 (gsi, COND_EXPR, inner_type, aa, bb, cc);
constructor_elt ce = {NULL_TREE, result};
v->quick_push (ce);
+ index = int_const_binop (PLUS_EXPR, index, width);
+ if (width == comp_width)
+ comp_index = index;
+ else
+ comp_index = int_const_binop (PLUS_EXPR, comp_index, comp_width);
}
constr = build_constructor (type, v);
static tree
expand_vector_operation (gimple_stmt_iterator *gsi, tree type, tree compute_type,
- gimple assign, enum tree_code code)
+ gassign *assign, enum tree_code code)
{
machine_mode compute_mode = TYPE_MODE (compute_type);
{
case PLUS_EXPR:
case MINUS_EXPR:
- if (!TYPE_OVERFLOW_TRAPS (type))
+ if (ANY_INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_TRAPS (type))
return expand_vector_addition (gsi, do_binop, do_plus_minus, type,
gimple_assign_rhs1 (assign),
gimple_assign_rhs2 (assign), code);
break;
case NEGATE_EXPR:
- if (!TYPE_OVERFLOW_TRAPS (type))
+ if (ANY_INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_TRAPS (type))
return expand_vector_addition (gsi, do_unop, do_negate, type,
gimple_assign_rhs1 (assign),
NULL_TREE, code);
static void
optimize_vector_constructor (gimple_stmt_iterator *gsi)
{
- gimple stmt = gsi_stmt (*gsi);
+ gassign *stmt = as_a <gassign *> (gsi_stmt (*gsi));
tree lhs = gimple_assign_lhs (stmt);
tree rhs = gimple_assign_rhs1 (stmt);
tree type = TREE_TYPE (rhs);
unsigned int i, j, nelts = TYPE_VECTOR_SUBPARTS (type);
bool all_same = true;
constructor_elt *elt;
- tree *cst;
- gimple g;
+ gimple *g;
tree base = NULL_TREE;
optab op;
}
if (all_same)
return;
- cst = XALLOCAVEC (tree, nelts);
+ tree_vector_builder cst (type, nelts, 1);
for (i = 0; i < nelts; i++)
{
- tree this_base = CONSTRUCTOR_ELT (rhs, i)->value;;
- cst[i] = build_zero_cst (TREE_TYPE (base));
+ tree this_base = CONSTRUCTOR_ELT (rhs, i)->value;
+ tree elt = build_zero_cst (TREE_TYPE (base));
while (this_base != base)
{
g = SSA_NAME_DEF_STMT (this_base);
- cst[i] = fold_binary (PLUS_EXPR, TREE_TYPE (base),
- cst[i], gimple_assign_rhs2 (g));
- if (cst[i] == NULL_TREE
- || TREE_CODE (cst[i]) != INTEGER_CST
- || TREE_OVERFLOW (cst[i]))
+ elt = fold_binary (PLUS_EXPR, TREE_TYPE (base),
+ elt, gimple_assign_rhs2 (g));
+ if (elt == NULL_TREE
+ || TREE_CODE (elt) != INTEGER_CST
+ || TREE_OVERFLOW (elt))
return;
this_base = gimple_assign_rhs1 (g);
}
+ cst.quick_push (elt);
}
for (i = 0; i < nelts; i++)
CONSTRUCTOR_ELT (rhs, i)->value = base;
- g = gimple_build_assign (make_ssa_name (type, NULL), rhs);
+ g = gimple_build_assign (make_ssa_name (type), rhs);
gsi_insert_before (gsi, g, GSI_SAME_STMT);
- g = gimple_build_assign_with_ops (PLUS_EXPR, lhs, gimple_assign_lhs (g),
- build_vector (type, cst));
+ g = gimple_build_assign (lhs, PLUS_EXPR, gimple_assign_lhs (g),
+ cst.build ());
gsi_replace (gsi, g, false);
}
\f
{
machine_mode inner_mode = TYPE_MODE (type);
machine_mode best_mode = VOIDmode, mode;
- int best_nunits = 0;
+ poly_int64 best_nunits = 0;
if (SCALAR_FLOAT_MODE_P (inner_mode))
mode = MIN_MODE_VECTOR_FLOAT;
else
mode = MIN_MODE_VECTOR_INT;
- for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
+ FOR_EACH_MODE_FROM (mode, mode)
if (GET_MODE_INNER (mode) == inner_mode
- && GET_MODE_NUNITS (mode) > best_nunits
+ && maybe_gt (GET_MODE_NUNITS (mode), best_nunits)
&& optab_handler (op, mode) != CODE_FOR_nothing)
best_mode = mode, best_nunits = GET_MODE_NUNITS (mode);
vector_element (gimple_stmt_iterator *gsi, tree vect, tree idx, tree *ptmpvec)
{
tree vect_type, vect_elt_type;
- gimple asgn;
+ gimple *asgn;
tree tmpvec;
tree arraytype;
bool need_asgn = true;
vect_type = TREE_TYPE (vect);
vect_elt_type = TREE_TYPE (vect_type);
- elements = TYPE_VECTOR_SUBPARTS (vect_type);
+ elements = nunits_for_known_piecewise_op (vect_type);
if (TREE_CODE (idx) == INTEGER_CST)
{
simplification by looking through intermediate vector results. */
if (TREE_CODE (vect) == SSA_NAME)
{
- gimple def_stmt = SSA_NAME_DEF_STMT (vect);
+ gimple *def_stmt = SSA_NAME_DEF_STMT (vect);
if (is_gimple_assign (def_stmt)
&& (gimple_assign_rhs_code (def_stmt) == VECTOR_CST
|| gimple_assign_rhs_code (def_stmt) == CONSTRUCTOR))
static void
lower_vec_perm (gimple_stmt_iterator *gsi)
{
- gimple stmt = gsi_stmt (*gsi);
+ gassign *stmt = as_a <gassign *> (gsi_stmt (*gsi));
tree mask = gimple_assign_rhs3 (stmt);
tree vec0 = gimple_assign_rhs1 (stmt);
tree vec1 = gimple_assign_rhs2 (stmt);
if (TREE_CODE (mask) == SSA_NAME)
{
- gimple def_stmt = SSA_NAME_DEF_STMT (mask);
+ gimple *def_stmt = SSA_NAME_DEF_STMT (mask);
if (is_gimple_assign (def_stmt)
&& gimple_assign_rhs_code (def_stmt) == VECTOR_CST)
mask = gimple_assign_rhs1 (def_stmt);
}
- if (TREE_CODE (mask) == VECTOR_CST)
- {
- unsigned char *sel_int = XALLOCAVEC (unsigned char, elements);
+ vec_perm_builder sel_int;
- for (i = 0; i < elements; ++i)
- sel_int[i] = (TREE_INT_CST_LOW (VECTOR_CST_ELT (mask, i))
- & (2 * elements - 1));
-
- if (can_vec_perm_p (TYPE_MODE (vect_type), false, sel_int))
+ if (TREE_CODE (mask) == VECTOR_CST
+ && tree_to_vec_perm_builder (&sel_int, mask))
+ {
+ vec_perm_indices indices (sel_int, 2, elements);
+ if (can_vec_perm_const_p (TYPE_MODE (vect_type), indices))
{
gimple_assign_set_rhs3 (stmt, mask);
update_stmt (stmt);
return;
}
+ /* Also detect vec_shr pattern - VEC_PERM_EXPR with zero
+ vector as VEC1 and a right element shift MASK. */
+ if (optab_handler (vec_shr_optab, TYPE_MODE (vect_type))
+ != CODE_FOR_nothing
+ && TREE_CODE (vec1) == VECTOR_CST
+ && initializer_zerop (vec1)
+ && maybe_ne (indices[0], 0)
+ && known_lt (indices[0], elements))
+ {
+ bool ok_p = indices.series_p (0, 1, indices[0], 1);
+ if (!ok_p)
+ {
+ for (i = 1; i < elements; ++i)
+ {
+ poly_int64 expected = i + indices[0];
+ /* Indices into the second vector are all equivalent. */
+ if (maybe_lt (indices[i], elements)
+ ? maybe_ne (indices[i], expected)
+ : maybe_lt (expected, elements))
+ break;
+ }
+ ok_p = i == elements;
+ }
+ if (ok_p)
+ {
+ gimple_assign_set_rhs3 (stmt, mask);
+ update_stmt (stmt);
+ return;
+ }
+ }
}
- else if (can_vec_perm_p (TYPE_MODE (vect_type), true, NULL))
+ else if (can_vec_perm_var_p (TYPE_MODE (vect_type)))
return;
warning_at (loc, OPT_Wvector_operation_performance,
update_stmt (gsi_stmt (*gsi));
}
+/* If OP is a uniform vector return the element it is a splat from. */
+
+static tree
+ssa_uniform_vector_p (tree op)
+{
+ if (TREE_CODE (op) == VECTOR_CST
+ || TREE_CODE (op) == VEC_DUPLICATE_EXPR
+ || TREE_CODE (op) == CONSTRUCTOR)
+ return uniform_vector_p (op);
+ if (TREE_CODE (op) == SSA_NAME)
+ {
+ gimple *def_stmt = SSA_NAME_DEF_STMT (op);
+ if (gimple_assign_single_p (def_stmt))
+ return uniform_vector_p (gimple_assign_rhs1 (def_stmt));
+ }
+ return NULL_TREE;
+}
+
/* Return type in which CODE operation with optab OP can be
computed. */
tree vector_compute_type
= type_for_widest_vector_mode (TREE_TYPE (type), op);
if (vector_compute_type != NULL_TREE
- && (TYPE_VECTOR_SUBPARTS (vector_compute_type)
- < TYPE_VECTOR_SUBPARTS (compute_type))
+ && subparts_gt (compute_type, vector_compute_type)
+ && TYPE_VECTOR_SUBPARTS (vector_compute_type) > 1
&& (optab_handler (op, TYPE_MODE (vector_compute_type))
!= CODE_FOR_nothing))
compute_type = vector_compute_type;
return compute_type;
}
-/* Helper function of expand_vector_operations_1. Return number of
- vector elements for vector types or 1 for other types. */
+static tree
+do_cond (gimple_stmt_iterator *gsi, tree inner_type, tree a, tree b,
+ tree bitpos, tree bitsize, enum tree_code code,
+ tree type ATTRIBUTE_UNUSED)
+{
+ if (TREE_CODE (TREE_TYPE (a)) == VECTOR_TYPE)
+ a = tree_vec_extract (gsi, inner_type, a, bitsize, bitpos);
+ if (TREE_CODE (TREE_TYPE (b)) == VECTOR_TYPE)
+ b = tree_vec_extract (gsi, inner_type, b, bitsize, bitpos);
+ tree cond = gimple_assign_rhs1 (gsi_stmt (*gsi));
+ return gimplify_build3 (gsi, code, inner_type, unshare_expr (cond), a, b);
+}
-static inline int
-count_type_subparts (tree type)
+/* Expand a vector COND_EXPR to scalars, piecewise. */
+static void
+expand_vector_scalar_condition (gimple_stmt_iterator *gsi)
{
- return VECTOR_TYPE_P (type) ? TYPE_VECTOR_SUBPARTS (type) : 1;
+ gassign *stmt = as_a <gassign *> (gsi_stmt (*gsi));
+ tree type = gimple_expr_type (stmt);
+ tree compute_type = get_compute_type (COND_EXPR, mov_optab, type);
+ machine_mode compute_mode = TYPE_MODE (compute_type);
+ gcc_assert (compute_mode != BLKmode);
+ tree lhs = gimple_assign_lhs (stmt);
+ tree rhs2 = gimple_assign_rhs2 (stmt);
+ tree rhs3 = gimple_assign_rhs3 (stmt);
+ tree new_rhs;
+
+ /* If the compute mode is not a vector mode (hence we are not decomposing
+ a BLKmode vector to smaller, hardware-supported vectors), we may want
+ to expand the operations in parallel. */
+ if (GET_MODE_CLASS (compute_mode) != MODE_VECTOR_INT
+ && GET_MODE_CLASS (compute_mode) != MODE_VECTOR_FLOAT
+ && GET_MODE_CLASS (compute_mode) != MODE_VECTOR_FRACT
+ && GET_MODE_CLASS (compute_mode) != MODE_VECTOR_UFRACT
+ && GET_MODE_CLASS (compute_mode) != MODE_VECTOR_ACCUM
+ && GET_MODE_CLASS (compute_mode) != MODE_VECTOR_UACCUM)
+ new_rhs = expand_vector_parallel (gsi, do_cond, type, rhs2, rhs3,
+ COND_EXPR);
+ else
+ new_rhs = expand_vector_piecewise (gsi, do_cond, type, compute_type,
+ rhs2, rhs3, COND_EXPR);
+ if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (new_rhs)))
+ new_rhs = gimplify_build1 (gsi, VIEW_CONVERT_EXPR, TREE_TYPE (lhs),
+ new_rhs);
+
+ /* NOTE: We should avoid using gimple_assign_set_rhs_from_tree. One
+ way to do it is change expand_vector_operation and its callees to
+ return a tree_code, RHS1 and RHS2 instead of a tree. */
+ gimple_assign_set_rhs_from_tree (gsi, new_rhs);
+ update_stmt (gsi_stmt (*gsi));
}
/* Process one statement. If we identify a vector operation, expand it. */
static void
expand_vector_operations_1 (gimple_stmt_iterator *gsi)
{
- gimple stmt = gsi_stmt (*gsi);
tree lhs, rhs1, rhs2 = NULL, type, compute_type = NULL_TREE;
enum tree_code code;
optab op = unknown_optab;
enum gimple_rhs_class rhs_class;
tree new_rhs;
- if (gimple_code (stmt) != GIMPLE_ASSIGN)
+ /* Only consider code == GIMPLE_ASSIGN. */
+ gassign *stmt = dyn_cast <gassign *> (gsi_stmt (*gsi));
+ if (!stmt)
return;
code = gimple_assign_rhs_code (stmt);
return;
}
+ if (code == COND_EXPR
+ && TREE_CODE (TREE_TYPE (gimple_assign_lhs (stmt))) == VECTOR_TYPE
+ && TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt))) == BLKmode)
+ {
+ expand_vector_scalar_condition (gsi);
+ return;
+ }
+
if (code == CONSTRUCTOR
&& TREE_CODE (lhs) == SSA_NAME
&& VECTOR_MODE_P (TYPE_MODE (TREE_TYPE (lhs)))
if (rhs_class == GIMPLE_BINARY_RHS)
rhs2 = gimple_assign_rhs2 (stmt);
- if (TREE_CODE (type) != VECTOR_TYPE)
+ if (!VECTOR_TYPE_P (type)
+ || !VECTOR_TYPE_P (TREE_TYPE (rhs1)))
+ return;
+
+ /* If the vector operation is operating on all same vector elements
+ implement it with a scalar operation and a splat if the target
+ supports the scalar operation. */
+ tree srhs1, srhs2 = NULL_TREE;
+ if ((srhs1 = ssa_uniform_vector_p (rhs1)) != NULL_TREE
+ && (rhs2 == NULL_TREE
+ || (! VECTOR_TYPE_P (TREE_TYPE (rhs2))
+ && (srhs2 = rhs2))
+ || (srhs2 = ssa_uniform_vector_p (rhs2)) != NULL_TREE)
+ /* As we query direct optabs restrict to non-convert operations. */
+ && TYPE_MODE (TREE_TYPE (type)) == TYPE_MODE (TREE_TYPE (srhs1)))
+ {
+ op = optab_for_tree_code (code, TREE_TYPE (type), optab_scalar);
+ if (op >= FIRST_NORM_OPTAB && op <= LAST_NORM_OPTAB
+ && optab_handler (op, TYPE_MODE (TREE_TYPE (type))) != CODE_FOR_nothing)
+ {
+ tree slhs = make_ssa_name (TREE_TYPE (srhs1));
+ gimple *repl = gimple_build_assign (slhs, code, srhs1, srhs2);
+ gsi_insert_before (gsi, repl, GSI_SAME_STMT);
+ gimple_assign_set_rhs_from_tree (gsi,
+ build_vector_from_val (type, slhs));
+ update_stmt (stmt);
+ return;
+ }
+ }
+
+ /* A scalar operation pretending to be a vector one. */
+ if (VECTOR_BOOLEAN_TYPE_P (type)
+ && !VECTOR_MODE_P (TYPE_MODE (type))
+ && TYPE_MODE (type) != BLKmode)
return;
if (CONVERT_EXPR_CODE_P (code)
/* The signedness is determined from input argument. */
if (code == VEC_UNPACK_FLOAT_HI_EXPR
|| code == VEC_UNPACK_FLOAT_LO_EXPR)
- type = TREE_TYPE (rhs1);
+ {
+ type = TREE_TYPE (rhs1);
+ /* We do not know how to scalarize those. */
+ return;
+ }
/* For widening/narrowing vector operations, the relevant type is of the
arguments, not the widened result. VEC_UNPACK_FLOAT_*_EXPR is
|| code == VEC_PACK_FIX_TRUNC_EXPR
|| code == VEC_WIDEN_LSHIFT_HI_EXPR
|| code == VEC_WIDEN_LSHIFT_LO_EXPR)
- type = TREE_TYPE (rhs1);
+ {
+ type = TREE_TYPE (rhs1);
+ /* We do not know how to scalarize those. */
+ return;
+ }
/* Choose between vector shift/rotate by vector and vector shift/rotate by
scalar */
if (VECTOR_INTEGER_TYPE_P (TREE_TYPE (rhs2)))
{
tree first;
- gimple def_stmt;
-
- if ((TREE_CODE (rhs2) == VECTOR_CST
- && (first = uniform_vector_p (rhs2)) != NULL_TREE)
- || (TREE_CODE (rhs2) == SSA_NAME
- && (def_stmt = SSA_NAME_DEF_STMT (rhs2))
- && gimple_assign_single_p (def_stmt)
- && (first = uniform_vector_p
- (gimple_assign_rhs1 (def_stmt))) != NULL_TREE))
+
+ if ((first = ssa_uniform_vector_p (rhs2)) != NULL_TREE)
{
gimple_assign_set_rhs2 (stmt, first);
update_stmt (stmt);
/* The rtl expander will expand vector/scalar as vector/vector
if necessary. Pick one with wider vector type. */
tree compute_vtype = get_compute_type (code, opv, type);
- if (count_type_subparts (compute_vtype)
- > count_type_subparts (compute_type))
+ if (subparts_gt (compute_vtype, compute_type))
{
compute_type = compute_vtype;
op = opv;
tree compute_rtype = get_compute_type (RSHIFT_EXPR, opr, type);
/* The rtl expander will expand vector/scalar as vector/vector
if necessary. Pick one with wider vector type. */
- if (count_type_subparts (compute_lvtype)
- > count_type_subparts (compute_ltype))
+ if (subparts_gt (compute_lvtype, compute_ltype))
{
compute_ltype = compute_lvtype;
opl = oplv;
}
- if (count_type_subparts (compute_rvtype)
- > count_type_subparts (compute_rtype))
+ if (subparts_gt (compute_rvtype, compute_rtype))
{
compute_rtype = compute_rvtype;
opr = oprv;
/* Pick the narrowest type from LSHIFT_EXPR, RSHIFT_EXPR and
BIT_IOR_EXPR. */
compute_type = compute_ltype;
- if (count_type_subparts (compute_type)
- > count_type_subparts (compute_rtype))
+ if (subparts_gt (compute_type, compute_rtype))
compute_type = compute_rtype;
- if (count_type_subparts (compute_type)
- > count_type_subparts (compute_otype))
+ if (subparts_gt (compute_type, compute_otype))
compute_type = compute_otype;
/* Verify all 3 operations can be performed in that type. */
if (compute_type != TREE_TYPE (type))
PROP_gimple_lvec, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- ( TODO_update_ssa
- | TODO_cleanup_cfg ), /* todo_flags_finish */
+ TODO_update_ssa, /* todo_flags_finish */
};
class pass_lower_vector : public gimple_opt_pass