/* Internal functions.
- Copyright (C) 2011-2018 Free Software Foundation, Inc.
+ Copyright (C) 2011-2019 Free Software Foundation, Inc.
This file is part of GCC.
#define scatter_store_direct { 3, 3, false }
#define unary_direct { 0, 0, true }
#define binary_direct { 0, 0, true }
+#define ternary_direct { 0, 0, true }
#define cond_unary_direct { 1, 1, true }
#define cond_binary_direct { 1, 1, true }
+#define cond_ternary_direct { 1, 1, true }
#define while_direct { 0, 2, false }
#define fold_extract_direct { 2, 2, false }
#define fold_left_direct { 1, 1, false }
+#define mask_fold_left_direct { 1, 1, false }
const direct_internal_fn_info direct_internal_fn_array[IFN_LAST + 1] = {
#define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) not_direct,
static void
expand_load_lanes_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
tree type, lhs, rhs;
rtx target, mem;
static void
expand_store_lanes_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
tree type, lhs, rhs;
rtx target, reg;
target = gen_reg_rtx (Pmode);
rtx size = expand_normal (gimple_call_arg (stmt, 0));
rtx align = expand_normal (gimple_call_arg (stmt, 1));
- struct expand_operand ops[3];
+ class expand_operand ops[3];
create_output_operand (&ops[0], target, Pmode);
create_input_operand (&ops[1], size, Pmode);
create_input_operand (&ops[2], align, Pmode);
{
gcc_checking_assert (!gimple_call_lhs (stmt));
rtx arg = expand_normal (gimple_call_arg (stmt, 0));
- struct expand_operand ops[1];
+ class expand_operand ops[1];
create_input_operand (&ops[0], arg, Pmode);
gcc_assert (targetm.have_omp_simt_exit ());
expand_insn (targetm.code_for_omp_simt_exit, 1, ops);
rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
rtx cond = expand_normal (gimple_call_arg (stmt, 0));
machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
- struct expand_operand ops[2];
+ class expand_operand ops[2];
create_output_operand (&ops[0], target, mode);
create_input_operand (&ops[1], cond, mode);
gcc_assert (targetm.have_omp_simt_last_lane ());
rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
rtx ctr = expand_normal (gimple_call_arg (stmt, 0));
machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
- struct expand_operand ops[2];
+ class expand_operand ops[2];
create_output_operand (&ops[0], target, mode);
create_input_operand (&ops[1], ctr, mode);
gcc_assert (targetm.have_omp_simt_ordered ());
rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
rtx cond = expand_normal (gimple_call_arg (stmt, 0));
machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
- struct expand_operand ops[2];
+ class expand_operand ops[2];
create_output_operand (&ops[0], target, mode);
create_input_operand (&ops[1], cond, mode);
gcc_assert (targetm.have_omp_simt_vote_any ());
rtx src = expand_normal (gimple_call_arg (stmt, 0));
rtx idx = expand_normal (gimple_call_arg (stmt, 1));
machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
- struct expand_operand ops[3];
+ class expand_operand ops[3];
create_output_operand (&ops[0], target, mode);
create_input_operand (&ops[1], src, mode);
create_input_operand (&ops[2], idx, SImode);
rtx src = expand_normal (gimple_call_arg (stmt, 0));
rtx idx = expand_normal (gimple_call_arg (stmt, 1));
machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
- struct expand_operand ops[3];
+ class expand_operand ops[3];
create_output_operand (&ops[0], target, mode);
create_input_operand (&ops[1], src, mode);
create_input_operand (&ops[2], idx, SImode);
: usubv4_optab, mode);
if (icode != CODE_FOR_nothing)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
rtx_insn *last = get_last_insn ();
res = gen_reg_rtx (mode);
: subv4_optab, mode);
if (icode != CODE_FOR_nothing)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
rtx_insn *last = get_last_insn ();
res = gen_reg_rtx (mode);
enum insn_code icode = optab_handler (negv3_optab, mode);
if (icode != CODE_FOR_nothing)
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
rtx_insn *last = get_last_insn ();
res = gen_reg_rtx (mode);
}
if (icode != CODE_FOR_nothing)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
rtx_insn *last = get_last_insn ();
res = gen_reg_rtx (mode);
/* If both op0 and op1 are sign (!uns) or zero (uns) extended from
hmode to mode, the multiplication will never overflow. We can
do just one hmode x hmode => mode widening multiplication. */
- rtx lopart0s = lopart0, lopart1s = lopart1;
- if (GET_CODE (lopart0) == SUBREG)
- {
- lopart0s = shallow_copy_rtx (lopart0);
- SUBREG_PROMOTED_VAR_P (lopart0s) = 1;
- SUBREG_PROMOTED_SET (lopart0s, uns ? SRP_UNSIGNED : SRP_SIGNED);
- }
- if (GET_CODE (lopart1) == SUBREG)
- {
- lopart1s = shallow_copy_rtx (lopart1);
- SUBREG_PROMOTED_VAR_P (lopart1s) = 1;
- SUBREG_PROMOTED_SET (lopart1s, uns ? SRP_UNSIGNED : SRP_SIGNED);
- }
tree halfstype = build_nonstandard_integer_type (hprec, uns);
- ops.op0 = make_tree (halfstype, lopart0s);
- ops.op1 = make_tree (halfstype, lopart1s);
+ ops.op0 = make_tree (halfstype, lopart0);
+ ops.op1 = make_tree (halfstype, lopart1);
ops.code = WIDEN_MULT_EXPR;
ops.type = type;
rtx thisres
static void
expand_mask_load_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
tree type, lhs, rhs, maskt;
rtx mem, target, mask;
insn_code icode;
static void
expand_mask_store_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
tree type, lhs, rhs, maskt;
rtx mem, reg, mask;
insn_code icode;
gcc_unreachable ();
}
+/* IFN_VEC_CONVERT is supposed to be expanded at pass_lower_vector. So this
+ dummy function should never be called. */
+
+static void
+expand_VEC_CONVERT (internal_fn, gcall *)
+{
+ gcc_unreachable ();
+}
+
/* Expand the IFN_UNIQUE function according to its first argument. */
static void
HOST_WIDE_INT scale_int = tree_to_shwi (scale);
rtx rhs_rtx = expand_normal (rhs);
- struct expand_operand ops[6];
+ class expand_operand ops[6];
int i = 0;
create_address_operand (&ops[i++], base_rtx);
create_input_operand (&ops[i++], offset_rtx, TYPE_MODE (TREE_TYPE (offset)));
HOST_WIDE_INT scale_int = tree_to_shwi (scale);
int i = 0;
- struct expand_operand ops[6];
+ class expand_operand ops[6];
create_output_operand (&ops[i++], lhs_rtx, TYPE_MODE (TREE_TYPE (lhs)));
create_address_operand (&ops[i++], base_rtx);
create_input_operand (&ops[i++], offset_rtx, TYPE_MODE (TREE_TYPE (offset)));
tree_pair types = direct_internal_fn_types (fn, stmt);
insn_code icode = direct_optab_handler (optab, TYPE_MODE (types.first));
+ gcc_assert (icode != CODE_FOR_nothing);
tree lhs = gimple_call_lhs (stmt);
- tree lhs_type = TREE_TYPE (lhs);
- rtx lhs_rtx = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
+ rtx lhs_rtx = NULL_RTX;
+ if (lhs)
+ lhs_rtx = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
/* Do not assign directly to a promoted subreg, since there is no
guarantee that the instruction will leave the upper bits of the
register in the state required by SUBREG_PROMOTED_SIGN. */
rtx dest = lhs_rtx;
- if (GET_CODE (dest) == SUBREG && SUBREG_PROMOTED_VAR_P (dest))
+ if (dest && GET_CODE (dest) == SUBREG && SUBREG_PROMOTED_VAR_P (dest))
dest = NULL_RTX;
create_output_operand (&ops[0], dest, insn_data[icode].operand[0].mode);
}
expand_insn (icode, nargs + 1, ops);
- if (!rtx_equal_p (lhs_rtx, ops[0].value))
+ if (lhs_rtx && !rtx_equal_p (lhs_rtx, ops[0].value))
{
/* If the return value has an integral type, convert the instruction
result to that type. This is useful for things that return an
/* If this is a scalar in a register that is stored in a wider
mode than the declared mode, compute the result into its
declared mode and then convert to the wider mode. */
- gcc_checking_assert (INTEGRAL_TYPE_P (lhs_type));
+ gcc_checking_assert (INTEGRAL_TYPE_P (TREE_TYPE (lhs)));
rtx tmp = convert_to_mode (GET_MODE (lhs_rtx), ops[0].value, 0);
convert_move (SUBREG_REG (lhs_rtx), tmp,
SUBREG_PROMOTED_SIGN (lhs_rtx));
emit_move_insn (lhs_rtx, ops[0].value);
else
{
- gcc_checking_assert (INTEGRAL_TYPE_P (lhs_type));
+ gcc_checking_assert (INTEGRAL_TYPE_P (TREE_TYPE (lhs)));
convert_move (lhs_rtx, ops[0].value, 0);
}
}
#define expand_binary_optab_fn(FN, STMT, OPTAB) \
expand_direct_optab_fn (FN, STMT, OPTAB, 2)
+#define expand_ternary_optab_fn(FN, STMT, OPTAB) \
+ expand_direct_optab_fn (FN, STMT, OPTAB, 3)
+
#define expand_cond_unary_optab_fn(FN, STMT, OPTAB) \
- expand_direct_optab_fn (FN, STMT, OPTAB, 2)
+ expand_direct_optab_fn (FN, STMT, OPTAB, 3)
#define expand_cond_binary_optab_fn(FN, STMT, OPTAB) \
- expand_direct_optab_fn (FN, STMT, OPTAB, 3)
+ expand_direct_optab_fn (FN, STMT, OPTAB, 4)
+
+#define expand_cond_ternary_optab_fn(FN, STMT, OPTAB) \
+ expand_direct_optab_fn (FN, STMT, OPTAB, 5)
#define expand_fold_extract_optab_fn(FN, STMT, OPTAB) \
expand_direct_optab_fn (FN, STMT, OPTAB, 3)
#define expand_fold_left_optab_fn(FN, STMT, OPTAB) \
expand_direct_optab_fn (FN, STMT, OPTAB, 2)
+#define expand_mask_fold_left_optab_fn(FN, STMT, OPTAB) \
+ expand_direct_optab_fn (FN, STMT, OPTAB, 3)
+
/* RETURN_TYPE and ARGS are a return type and argument list that are
in principle compatible with FN (which satisfies direct_internal_fn_p).
Return the types that should be used to determine whether the
#define direct_unary_optab_supported_p direct_optab_supported_p
#define direct_binary_optab_supported_p direct_optab_supported_p
+#define direct_ternary_optab_supported_p direct_optab_supported_p
#define direct_cond_unary_optab_supported_p direct_optab_supported_p
#define direct_cond_binary_optab_supported_p direct_optab_supported_p
+#define direct_cond_ternary_optab_supported_p direct_optab_supported_p
#define direct_mask_load_optab_supported_p direct_optab_supported_p
#define direct_load_lanes_optab_supported_p multi_vector_optab_supported_p
#define direct_mask_load_lanes_optab_supported_p multi_vector_optab_supported_p
#define direct_while_optab_supported_p convert_optab_supported_p
#define direct_fold_extract_optab_supported_p direct_optab_supported_p
#define direct_fold_left_optab_supported_p direct_optab_supported_p
+#define direct_mask_fold_left_optab_supported_p direct_optab_supported_p
/* Return the optab used by internal function FN. */
return direct_internal_fn_supported_p (fn, tree_pair (type, type), opt_type);
}
+/* Return true if the STMT is supported when the optimization type is OPT_TYPE,
+ given that STMT is a call to a direct internal function. */
+
+bool
+direct_internal_fn_supported_p (gcall *stmt, optimization_type opt_type)
+{
+ internal_fn fn = gimple_call_internal_fn (stmt);
+ tree_pair types = direct_internal_fn_types (fn, stmt);
+ return direct_internal_fn_supported_p (fn, types, opt_type);
+}
+
+/* If FN is commutative in two consecutive arguments, return the
+ index of the first, otherwise return -1. */
+
+int
+first_commutative_argument (internal_fn fn)
+{
+ switch (fn)
+ {
+ case IFN_FMA:
+ case IFN_FMS:
+ case IFN_FNMA:
+ case IFN_FNMS:
+ case IFN_AVG_FLOOR:
+ case IFN_AVG_CEIL:
+ case IFN_MULHS:
+ case IFN_MULHRS:
+ case IFN_FMIN:
+ case IFN_FMAX:
+ return 0;
+
+ case IFN_COND_ADD:
+ case IFN_COND_MUL:
+ case IFN_COND_MIN:
+ case IFN_COND_MAX:
+ case IFN_COND_AND:
+ case IFN_COND_IOR:
+ case IFN_COND_XOR:
+ case IFN_COND_FMA:
+ case IFN_COND_FMS:
+ case IFN_COND_FNMA:
+ case IFN_COND_FNMS:
+ return 1;
+
+ default:
+ return -1;
+ }
+}
+
/* Return true if IFN_SET_EDOM is supported. */
bool
0
};
-/* Return a function that performs the conditional form of CODE, i.e.:
+/* Invoke T(CODE, IFN) for each conditional function IFN that maps to a
+ tree code CODE. */
+#define FOR_EACH_CODE_MAPPING(T) \
+ T (PLUS_EXPR, IFN_COND_ADD) \
+ T (MINUS_EXPR, IFN_COND_SUB) \
+ T (MULT_EXPR, IFN_COND_MUL) \
+ T (TRUNC_DIV_EXPR, IFN_COND_DIV) \
+ T (TRUNC_MOD_EXPR, IFN_COND_MOD) \
+ T (RDIV_EXPR, IFN_COND_RDIV) \
+ T (MIN_EXPR, IFN_COND_MIN) \
+ T (MAX_EXPR, IFN_COND_MAX) \
+ T (BIT_AND_EXPR, IFN_COND_AND) \
+ T (BIT_IOR_EXPR, IFN_COND_IOR) \
+ T (BIT_XOR_EXPR, IFN_COND_XOR) \
+ T (LSHIFT_EXPR, IFN_COND_SHL) \
+ T (RSHIFT_EXPR, IFN_COND_SHR)
+
+/* Return a function that only performs CODE when a certain condition is met
+ and that uses a given fallback value otherwise. For example, if CODE is
+ a binary operation associated with conditional function FN:
+
+ LHS = FN (COND, A, B, ELSE)
- LHS = RHS1 ? RHS2 CODE RHS3 : RHS2
+ is equivalent to the C expression:
- (operating elementwise if the operands are vectors). Return IFN_LAST
- if no such function exists. */
+ LHS = COND ? A CODE B : ELSE;
+
+ operating elementwise if the operands are vectors.
+
+ Return IFN_LAST if no such function exists. */
internal_fn
get_conditional_internal_fn (tree_code code)
{
switch (code)
{
- case PLUS_EXPR:
- return IFN_COND_ADD;
- case MINUS_EXPR:
- return IFN_COND_SUB;
- case MIN_EXPR:
- return IFN_COND_MIN;
- case MAX_EXPR:
- return IFN_COND_MAX;
- case BIT_AND_EXPR:
- return IFN_COND_AND;
- case BIT_IOR_EXPR:
- return IFN_COND_IOR;
- case BIT_XOR_EXPR:
- return IFN_COND_XOR;
+#define CASE(CODE, IFN) case CODE: return IFN;
+ FOR_EACH_CODE_MAPPING(CASE)
+#undef CASE
default:
return IFN_LAST;
}
}
+/* If IFN implements the conditional form of a tree code, return that
+ tree code, otherwise return ERROR_MARK. */
+
+tree_code
+conditional_internal_fn_code (internal_fn ifn)
+{
+ switch (ifn)
+ {
+#define CASE(CODE, IFN) case IFN: return CODE;
+ FOR_EACH_CODE_MAPPING(CASE)
+#undef CASE
+ default:
+ return ERROR_MARK;
+ }
+}
+
+/* Invoke T(IFN) for each internal function IFN that also has an
+ IFN_COND_* form. */
+#define FOR_EACH_COND_FN_PAIR(T) \
+ T (FMA) \
+ T (FMS) \
+ T (FNMA) \
+ T (FNMS)
+
+/* Return a function that only performs internal function FN when a
+ certain condition is met and that uses a given fallback value otherwise.
+ In other words, the returned function FN' is such that:
+
+ LHS = FN' (COND, A1, ... An, ELSE)
+
+ is equivalent to the C expression:
+
+ LHS = COND ? FN (A1, ..., An) : ELSE;
+
+ operating elementwise if the operands are vectors.
+
+ Return IFN_LAST if no such function exists. */
+
+internal_fn
+get_conditional_internal_fn (internal_fn fn)
+{
+ switch (fn)
+ {
+#define CASE(NAME) case IFN_##NAME: return IFN_COND_##NAME;
+ FOR_EACH_COND_FN_PAIR(CASE)
+#undef CASE
+ default:
+ return IFN_LAST;
+ }
+}
+
+/* If IFN implements the conditional form of an unconditional internal
+ function, return that unconditional function, otherwise return IFN_LAST. */
+
+internal_fn
+get_unconditional_internal_fn (internal_fn ifn)
+{
+ switch (ifn)
+ {
+#define CASE(NAME) case IFN_COND_##NAME: return IFN_##NAME;
+ FOR_EACH_COND_FN_PAIR(CASE)
+#undef CASE
+ default:
+ return IFN_LAST;
+ }
+}
+
+/* Return true if STMT can be interpreted as a conditional tree code
+ operation of the form:
+
+ LHS = COND ? OP (RHS1, ...) : ELSE;
+
+ operating elementwise if the operands are vectors. This includes
+ the case of an all-true COND, so that the operation always happens.
+
+ When returning true, set:
+
+ - *COND_OUT to the condition COND, or to NULL_TREE if the condition
+ is known to be all-true
+ - *CODE_OUT to the tree code
+ - OPS[I] to operand I of *CODE_OUT
+ - *ELSE_OUT to the fallback value ELSE, or to NULL_TREE if the
+ condition is known to be all true. */
+
+bool
+can_interpret_as_conditional_op_p (gimple *stmt, tree *cond_out,
+ tree_code *code_out,
+ tree (&ops)[3], tree *else_out)
+{
+ if (gassign *assign = dyn_cast <gassign *> (stmt))
+ {
+ *cond_out = NULL_TREE;
+ *code_out = gimple_assign_rhs_code (assign);
+ ops[0] = gimple_assign_rhs1 (assign);
+ ops[1] = gimple_assign_rhs2 (assign);
+ ops[2] = gimple_assign_rhs3 (assign);
+ *else_out = NULL_TREE;
+ return true;
+ }
+ if (gcall *call = dyn_cast <gcall *> (stmt))
+ if (gimple_call_internal_p (call))
+ {
+ internal_fn ifn = gimple_call_internal_fn (call);
+ tree_code code = conditional_internal_fn_code (ifn);
+ if (code != ERROR_MARK)
+ {
+ *cond_out = gimple_call_arg (call, 0);
+ *code_out = code;
+ unsigned int nops = gimple_call_num_args (call) - 2;
+ for (unsigned int i = 0; i < 3; ++i)
+ ops[i] = i < nops ? gimple_call_arg (call, i + 1) : NULL_TREE;
+ *else_out = gimple_call_arg (call, nops + 1);
+ if (integer_truep (*cond_out))
+ {
+ *cond_out = NULL_TREE;
+ *else_out = NULL_TREE;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
/* Return true if IFN is some form of load from memory. */
bool
return 4;
default:
- return -1;
+ return (conditional_internal_fn_code (fn) != ERROR_MARK
+ || get_unconditional_internal_fn (fn) != IFN_LAST ? 0 : -1);
}
}
expand_internal_call (gimple_call_internal_fn (stmt), stmt);
}
+/* If TYPE is a vector type, return true if IFN is a direct internal
+ function that is supported for that type. If TYPE is a scalar type,
+ return true if IFN is a direct internal function that is supported for
+ the target's preferred vector version of TYPE. */
+
+bool
+vectorized_internal_fn_supported_p (internal_fn ifn, tree type)
+{
+ scalar_mode smode;
+ if (!VECTOR_TYPE_P (type) && is_a <scalar_mode> (TYPE_MODE (type), &smode))
+ {
+ machine_mode vmode = targetm.vectorize.preferred_simd_mode (smode);
+ if (VECTOR_MODE_P (vmode))
+ type = build_vector_type_for_mode (type, vmode);
+ }
+
+ return (VECTOR_MODE_P (TYPE_MODE (type))
+ && direct_internal_fn_supported_p (ifn, type, OPTIMIZE_FOR_SPEED));
+}
+
void
expand_PHI (internal_fn, gcall *)
{