bool reduce_with_shift;
tree vec_temp;
+ gcc_assert (slp_reduc || new_phis.length () == 1);
+
/* See if the target wants to do the final (shift) reduction
in a vector mode of smaller size and first reduce upper/lower
halves against each other. */
tree stype = TREE_TYPE (vectype);
unsigned nunits = TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
unsigned nunits1 = nunits;
+ if ((mode1 = targetm.vectorize.split_reduction (mode)) != mode
+ && new_phis.length () == 1)
+ {
+ nunits1 = GET_MODE_NUNITS (mode1).to_constant ();
+ /* For SLP reductions we have to make sure lanes match up, but
+ since we're doing individual element final reduction reducing
+ vector width here is even more important.
+ ??? We can also separate lanes with permutes, for the common
+ case of power-of-two group-size odd/even extracts would work. */
+ if (slp_reduc && nunits != nunits1)
+ {
+ nunits1 = least_common_multiple (nunits1, group_size);
+ gcc_assert (exact_log2 (nunits1) != -1 && nunits1 <= nunits);
+ }
+ }
if (!slp_reduc
&& (mode1 = targetm.vectorize.split_reduction (mode)) != mode)
nunits1 = GET_MODE_NUNITS (mode1).to_constant ();
new_temp = new_phi_result;
while (nunits > nunits1)
{
- gcc_assert (!slp_reduc);
nunits /= 2;
vectype1 = get_related_vectype_for_scalar_type (TYPE_MODE (vectype),
stype, nunits);
int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
int element_bitsize = tree_to_uhwi (bitsize);
+ tree compute_type = TREE_TYPE (vectype);
+ gimple_seq stmts = NULL;
FOR_EACH_VEC_ELT (new_phis, i, new_phi)
{
int bit_offset;
vec_temp = PHI_RESULT (new_phi);
else
vec_temp = gimple_assign_lhs (new_phi);
- tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
- bitsize_zero_node);
- epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
- new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
- gimple_assign_set_lhs (epilog_stmt, new_temp);
- gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
+ new_temp = gimple_build (&stmts, BIT_FIELD_REF, compute_type,
+ vec_temp, bitsize, bitsize_zero_node);
/* In SLP we don't need to apply reduction operation, so we just
collect s' values in SCALAR_RESULTS. */
bit_offset += element_bitsize)
{
tree bitpos = bitsize_int (bit_offset);
- tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp,
- bitsize, bitpos);
-
- epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
- new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
- gimple_assign_set_lhs (epilog_stmt, new_name);
- gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
-
+ new_name = gimple_build (&stmts, BIT_FIELD_REF,
+ compute_type, vec_temp,
+ bitsize, bitpos);
if (slp_reduc)
{
/* In SLP we don't need to apply reduction operation, so
scalar_results.safe_push (new_name);
}
else
- {
- epilog_stmt = gimple_build_assign (new_scalar_dest, code,
- new_name, new_temp);
- new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
- gimple_assign_set_lhs (epilog_stmt, new_temp);
- gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
- }
+ new_temp = gimple_build (&stmts, code, compute_type,
+ new_name, new_temp);
}
}
if (slp_reduc)
{
tree res, first_res, new_res;
- gimple *new_stmt;
/* Reduce multiple scalar results in case of SLP unrolling. */
for (j = group_size; scalar_results.iterate (j, &res);
j++)
{
first_res = scalar_results[j % group_size];
- new_stmt = gimple_build_assign (new_scalar_dest, code,
- first_res, res);
- new_res = make_ssa_name (new_scalar_dest, new_stmt);
- gimple_assign_set_lhs (new_stmt, new_res);
- gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
+ new_res = gimple_build (&stmts, code, compute_type,
+ first_res, res);
scalar_results[j % group_size] = new_res;
}
+ for (k = 0; k < group_size; k++)
+ scalar_results[k] = gimple_convert (&stmts, scalar_type,
+ scalar_results[k]);
}
else
- /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
- scalar_results.safe_push (new_temp);
+ {
+ /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
+ new_temp = gimple_convert (&stmts, scalar_type, new_temp);
+ scalar_results.safe_push (new_temp);
+ }
+
+ gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
}
if ((STMT_VINFO_REDUC_TYPE (reduc_info) == INTEGER_INDUC_COND_REDUCTION)