enum tail_policy get_prefer_tail_policy ();
enum mask_policy get_prefer_mask_policy ();
rtx get_avl_type_rtx (enum avl_type);
+opt_machine_mode get_vector_mode (scalar_mode, poly_uint64);
}
/* We classify builtin types into two classes:
return gen_int_mode (type, Pmode);
}
+/* Return the RVV vector mode that has NUNITS elements of mode INNER_MODE.
+ This function is not only used by builtins, but also will be used by
+ auto-vectorization in the future. */
+opt_machine_mode
+get_vector_mode (scalar_mode inner_mode, poly_uint64 nunits)
+{
+ enum mode_class mclass;
+ if (inner_mode == E_BImode)
+ mclass = MODE_VECTOR_BOOL;
+ else if (FLOAT_MODE_P (inner_mode))
+ mclass = MODE_VECTOR_FLOAT;
+ else
+ mclass = MODE_VECTOR_INT;
+ machine_mode mode;
+ FOR_EACH_MODE_IN_CLASS (mode, mclass)
+ if (inner_mode == GET_MODE_INNER (mode)
+ && known_eq (nunits, GET_MODE_NUNITS (mode))
+ && riscv_v_ext_vector_mode_p (mode))
+ return mode;
+ return opt_machine_mode ();
+}
+
} // namespace riscv_vector
namespace riscv_vector {
+/* Enumerates types of loads/stores operations.
+ It's only used in here so we don't define it
+ in riscv-vector-builtins-bases.h. */
+enum lst_type
+{
+ LST_UNIT_STRIDE,
+ LST_STRIDED,
+ LST_INDEXED,
+};
+
/* Implements vsetvl<mode> && vsetvlmax<mode>. */
template<bool VLMAX_P>
class vsetvl : public function_base
}
};
-/* Implements vle.v/vse.v/vlm.v/vsm.v/vlse.v/vsse.v codegen. */
-template <bool STORE_P, bool STRIDED_P = false>
+/* Implements
+ * vle.v/vse.v/vlm.v/vsm.v/vlse.v/vsse.v/vluxei.v/vloxei.v/vsuxei.v/vsoxei.v
+ * codegen. */
+template<bool STORE_P, lst_type LST_TYPE, bool ORDERED_P>
class loadstore : public function_base
{
+public:
+ bool apply_tail_policy_p () const override { return !STORE_P; }
+ bool apply_mask_policy_p () const override { return !STORE_P; }
+
unsigned int call_properties (const function_instance &) const override
{
if (STORE_P)
bool can_be_overloaded_p (enum predication_type_index pred) const override
{
- if (STORE_P)
+ if (STORE_P || LST_TYPE == LST_INDEXED)
return true;
return pred != PRED_TYPE_none && pred != PRED_TYPE_mu;
}
rtx expand (function_expander &e) const override
{
- if (STORE_P)
+ if (LST_TYPE == LST_INDEXED)
+ {
+ int unspec = ORDERED_P ? UNSPEC_ORDERED : UNSPEC_UNORDERED;
+ if (STORE_P)
+ return e.use_exact_insn (
+ code_for_pred_indexed_store (unspec, e.vector_mode (),
+ e.index_mode ()));
+ else
+ return e.use_exact_insn (
+ code_for_pred_indexed_load (unspec, e.vector_mode (),
+ e.index_mode ()));
+ }
+ else if (LST_TYPE == LST_STRIDED)
{
- if (STRIDED_P)
+ if (STORE_P)
return e.use_contiguous_store_insn (
code_for_pred_strided_store (e.vector_mode ()));
else
- return e.use_contiguous_store_insn (
- code_for_pred_store (e.vector_mode ()));
+ return e.use_contiguous_load_insn (
+ code_for_pred_strided_load (e.vector_mode ()));
}
else
{
- if (STRIDED_P)
- return e.use_contiguous_load_insn (
- code_for_pred_strided_load (e.vector_mode ()));
+ if (STORE_P)
+ return e.use_contiguous_store_insn (
+ code_for_pred_store (e.vector_mode ()));
else
return e.use_contiguous_load_insn (
code_for_pred_mov (e.vector_mode ()));
static CONSTEXPR const vsetvl<false> vsetvl_obj;
static CONSTEXPR const vsetvl<true> vsetvlmax_obj;
-static CONSTEXPR const loadstore<false> vle_obj;
-static CONSTEXPR const loadstore<true> vse_obj;
-static CONSTEXPR const loadstore<false> vlm_obj;
-static CONSTEXPR const loadstore<true> vsm_obj;
-static CONSTEXPR const loadstore<false, true> vlse_obj;
-static CONSTEXPR const loadstore<true, true> vsse_obj;
+static CONSTEXPR const loadstore<false, LST_UNIT_STRIDE, false> vle_obj;
+static CONSTEXPR const loadstore<true, LST_UNIT_STRIDE, false> vse_obj;
+static CONSTEXPR const loadstore<false, LST_UNIT_STRIDE, false> vlm_obj;
+static CONSTEXPR const loadstore<true, LST_UNIT_STRIDE, false> vsm_obj;
+static CONSTEXPR const loadstore<false, LST_STRIDED, false> vlse_obj;
+static CONSTEXPR const loadstore<true, LST_STRIDED, false> vsse_obj;
+static CONSTEXPR const loadstore<false, LST_INDEXED, false> vluxei8_obj;
+static CONSTEXPR const loadstore<false, LST_INDEXED, false> vluxei16_obj;
+static CONSTEXPR const loadstore<false, LST_INDEXED, false> vluxei32_obj;
+static CONSTEXPR const loadstore<false, LST_INDEXED, false> vluxei64_obj;
+static CONSTEXPR const loadstore<false, LST_INDEXED, true> vloxei8_obj;
+static CONSTEXPR const loadstore<false, LST_INDEXED, true> vloxei16_obj;
+static CONSTEXPR const loadstore<false, LST_INDEXED, true> vloxei32_obj;
+static CONSTEXPR const loadstore<false, LST_INDEXED, true> vloxei64_obj;
+static CONSTEXPR const loadstore<true, LST_INDEXED, false> vsuxei8_obj;
+static CONSTEXPR const loadstore<true, LST_INDEXED, false> vsuxei16_obj;
+static CONSTEXPR const loadstore<true, LST_INDEXED, false> vsuxei32_obj;
+static CONSTEXPR const loadstore<true, LST_INDEXED, false> vsuxei64_obj;
+static CONSTEXPR const loadstore<true, LST_INDEXED, true> vsoxei8_obj;
+static CONSTEXPR const loadstore<true, LST_INDEXED, true> vsoxei16_obj;
+static CONSTEXPR const loadstore<true, LST_INDEXED, true> vsoxei32_obj;
+static CONSTEXPR const loadstore<true, LST_INDEXED, true> vsoxei64_obj;
/* Declare the function base NAME, pointing it to an instance
of class <NAME>_obj. */
BASE (vsm)
BASE (vlse)
BASE (vsse)
+BASE (vluxei8)
+BASE (vluxei16)
+BASE (vluxei32)
+BASE (vluxei64)
+BASE (vloxei8)
+BASE (vloxei16)
+BASE (vloxei32)
+BASE (vloxei64)
+BASE (vsuxei8)
+BASE (vsuxei16)
+BASE (vsuxei32)
+BASE (vsuxei64)
+BASE (vsoxei8)
+BASE (vsoxei16)
+BASE (vsoxei32)
+BASE (vsoxei64)
} // end namespace riscv_vector
extern const function_base *const vsm;
extern const function_base *const vlse;
extern const function_base *const vsse;
+extern const function_base *const vluxei8;
+extern const function_base *const vluxei16;
+extern const function_base *const vluxei32;
+extern const function_base *const vluxei64;
+extern const function_base *const vloxei8;
+extern const function_base *const vloxei16;
+extern const function_base *const vloxei32;
+extern const function_base *const vloxei64;
+extern const function_base *const vsuxei8;
+extern const function_base *const vsuxei16;
+extern const function_base *const vsuxei32;
+extern const function_base *const vsuxei64;
+extern const function_base *const vsoxei8;
+extern const function_base *const vsoxei16;
+extern const function_base *const vsoxei32;
+extern const function_base *const vsoxei64;
}
} // end namespace riscv_vector
DEF_RVV_FUNCTION (vsm, loadstore, none_preds, b_v_scalar_ptr_ops)
DEF_RVV_FUNCTION (vlse, loadstore, full_preds, all_v_scalar_const_ptr_ptrdiff_ops)
DEF_RVV_FUNCTION (vsse, loadstore, none_m_preds, all_v_scalar_ptr_ptrdiff_ops)
+DEF_RVV_FUNCTION (vluxei8, indexed_loadstore, full_preds, all_v_scalar_const_ptr_uint8_index_ops)
+DEF_RVV_FUNCTION (vluxei16, indexed_loadstore, full_preds, all_v_scalar_const_ptr_uint16_index_ops)
+DEF_RVV_FUNCTION (vluxei32, indexed_loadstore, full_preds, all_v_scalar_const_ptr_uint32_index_ops)
+DEF_RVV_FUNCTION (vluxei64, indexed_loadstore, full_preds, all_v_scalar_const_ptr_uint64_index_ops)
+DEF_RVV_FUNCTION (vloxei8, indexed_loadstore, full_preds, all_v_scalar_const_ptr_uint8_index_ops)
+DEF_RVV_FUNCTION (vloxei16, indexed_loadstore, full_preds, all_v_scalar_const_ptr_uint16_index_ops)
+DEF_RVV_FUNCTION (vloxei32, indexed_loadstore, full_preds, all_v_scalar_const_ptr_uint32_index_ops)
+DEF_RVV_FUNCTION (vloxei64, indexed_loadstore, full_preds, all_v_scalar_const_ptr_uint64_index_ops)
+DEF_RVV_FUNCTION (vsuxei8, indexed_loadstore, none_m_preds, all_v_scalar_ptr_uint8_index_ops)
+DEF_RVV_FUNCTION (vsuxei16, indexed_loadstore, none_m_preds, all_v_scalar_ptr_uint16_index_ops)
+DEF_RVV_FUNCTION (vsuxei32, indexed_loadstore, none_m_preds, all_v_scalar_ptr_uint32_index_ops)
+DEF_RVV_FUNCTION (vsuxei64, indexed_loadstore, none_m_preds, all_v_scalar_ptr_uint64_index_ops)
+DEF_RVV_FUNCTION (vsoxei8, indexed_loadstore, none_m_preds, all_v_scalar_ptr_uint8_index_ops)
+DEF_RVV_FUNCTION (vsoxei16, indexed_loadstore, none_m_preds, all_v_scalar_ptr_uint16_index_ops)
+DEF_RVV_FUNCTION (vsoxei32, indexed_loadstore, none_m_preds, all_v_scalar_ptr_uint32_index_ops)
+DEF_RVV_FUNCTION (vsoxei64, indexed_loadstore, none_m_preds, all_v_scalar_ptr_uint64_index_ops)
#undef DEF_RVV_FUNCTION
}
};
+/* indexed_loadstore_def class. */
+struct indexed_loadstore_def : public function_shape
+{
+ void build (function_builder &b,
+ const function_group_info &group) const override
+ {
+ for (unsigned int pred_idx = 0; group.preds[pred_idx] != NUM_PRED_TYPES;
+ ++pred_idx)
+ {
+ for (unsigned int vec_type_idx = 0;
+ group.ops_infos.types[vec_type_idx].index != NUM_VECTOR_TYPES;
+ ++vec_type_idx)
+ {
+ tree index_type = group.ops_infos.args[1].get_tree_type (
+ group.ops_infos.types[vec_type_idx].index);
+ if (!index_type)
+ continue;
+ build_one (b, group, pred_idx, vec_type_idx);
+ }
+ }
+ }
+
+ char *get_name (function_builder &b, const function_instance &instance,
+ bool overloaded_p) const override
+ {
+ /* Return nullptr if it can not be overloaded. */
+ if (overloaded_p && !instance.base->can_be_overloaded_p (instance.pred))
+ return nullptr;
+
+ b.append_base_name (instance.base_name);
+ /* vop<sew>_v --> vop<sew>_v_<type>. */
+ if (!overloaded_p)
+ {
+ /* vop<sew> --> vop<sew>_v. */
+ b.append_name (operand_suffixes[instance.op_info->op]);
+ /* vop<sew>_v --> vop<sew>_v_<type>. */
+ b.append_name (type_suffixes[instance.type.index].vector);
+ }
+
+ /* According to rvv-intrinsic-doc, it does not add "_m" suffix
+ for vop_m C++ overloaded API. */
+ if (overloaded_p && instance.pred == PRED_TYPE_m)
+ return b.finish_name ();
+ b.append_name (predication_suffixes[instance.pred]);
+ return b.finish_name ();
+ }
+};
+
SHAPE(vsetvl, vsetvl)
SHAPE(vsetvl, vsetvlmax)
SHAPE(loadstore, loadstore)
+SHAPE(indexed_loadstore, indexed_loadstore)
} // end namespace riscv_vector
extern const function_shape *const vsetvl;
extern const function_shape *const vsetvlmax;
extern const function_shape *const loadstore;
+extern const function_shape *const indexed_loadstore;
}
} // end namespace riscv_vector
rvv_arg_type_info (RVV_BASE_ptrdiff), rvv_arg_type_info (RVV_BASE_vector),
rvv_arg_type_info_end};
+/* A list of args for vector_type func (const scalar_type *, uint8_index_type)
+ * function. */
+static CONSTEXPR const rvv_arg_type_info scalar_const_ptr_uint8_index_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar_const_ptr),
+ rvv_arg_type_info (RVV_BASE_uint8_index), rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (const scalar_type *, uint16_index_type)
+ * function. */
+static CONSTEXPR const rvv_arg_type_info scalar_const_ptr_uint16_index_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar_const_ptr),
+ rvv_arg_type_info (RVV_BASE_uint16_index), rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (const scalar_type *, uint32_index_type)
+ * function. */
+static CONSTEXPR const rvv_arg_type_info scalar_const_ptr_uint32_index_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar_const_ptr),
+ rvv_arg_type_info (RVV_BASE_uint32_index), rvv_arg_type_info_end};
+
+/* A list of args for vector_type func (const scalar_type *, uint64_index_type)
+ * function. */
+static CONSTEXPR const rvv_arg_type_info scalar_const_ptr_uint64_index_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar_const_ptr),
+ rvv_arg_type_info (RVV_BASE_uint64_index), rvv_arg_type_info_end};
+
+/* A list of args for void func (scalar_type *, uint8_index_type, vector_type)
+ * function. */
+static CONSTEXPR const rvv_arg_type_info scalar_ptr_uint8_index_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar_ptr),
+ rvv_arg_type_info (RVV_BASE_uint8_index),
+ rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info_end};
+
+/* A list of args for void func (scalar_type *, uint16_index_type, vector_type)
+ * function. */
+static CONSTEXPR const rvv_arg_type_info scalar_ptr_uint16_index_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar_ptr),
+ rvv_arg_type_info (RVV_BASE_uint16_index),
+ rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info_end};
+
+/* A list of args for void func (scalar_type *, uint32_index_type, vector_type)
+ * function. */
+static CONSTEXPR const rvv_arg_type_info scalar_ptr_uint32_index_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar_ptr),
+ rvv_arg_type_info (RVV_BASE_uint32_index),
+ rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info_end};
+
+/* A list of args for void func (scalar_type *, uint64_index_type, vector_type)
+ * function. */
+static CONSTEXPR const rvv_arg_type_info scalar_ptr_uint64_index_args[]
+ = {rvv_arg_type_info (RVV_BASE_scalar_ptr),
+ rvv_arg_type_info (RVV_BASE_uint64_index),
+ rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info_end};
+
/* A list of none preds that will be registered for intrinsic functions. */
static CONSTEXPR const predication_type_index none_preds[]
= {PRED_TYPE_none, NUM_PRED_TYPES};
rvv_arg_type_info (RVV_BASE_vector), /* Return type */
scalar_const_ptr_ptrdiff_args /* Args */};
+/* A static operand information for vector_type func (const scalar_type *,
+ * uint8_index_type) function registration. */
+static CONSTEXPR const rvv_op_info all_v_scalar_const_ptr_uint8_index_ops
+ = {all_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ scalar_const_ptr_uint8_index_args /* Args */};
+
+/* A static operand information for vector_type func (const scalar_type *,
+ * uint16_index_type) function registration. */
+static CONSTEXPR const rvv_op_info all_v_scalar_const_ptr_uint16_index_ops
+ = {all_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ scalar_const_ptr_uint16_index_args /* Args */};
+
+/* A static operand information for vector_type func (const scalar_type *,
+ * uint32_index_type) function registration. */
+static CONSTEXPR const rvv_op_info all_v_scalar_const_ptr_uint32_index_ops
+ = {all_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ scalar_const_ptr_uint32_index_args /* Args */};
+
+/* A static operand information for vector_type func (const scalar_type *,
+ * uint64_index_type) function registration. */
+static CONSTEXPR const rvv_op_info all_v_scalar_const_ptr_uint64_index_ops
+ = {all_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ scalar_const_ptr_uint64_index_args /* Args */};
+
/* A static operand information for void func (scalar_type *, ptrdiff_t,
* vector_type) function registration. */
static CONSTEXPR const rvv_op_info all_v_scalar_ptr_ptrdiff_ops
rvv_arg_type_info (RVV_BASE_void), /* Return type */
scalar_ptr_ptrdiff_args /* Args */};
+/* A static operand information for void func (scalar_type *, uint8_index_type,
+ * vector_type) function registration. */
+static CONSTEXPR const rvv_op_info all_v_scalar_ptr_uint8_index_ops
+ = {all_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ scalar_ptr_uint8_index_args /* Args */};
+
+/* A static operand information for void func (scalar_type *, uint16_index_type,
+ * vector_type) function registration. */
+static CONSTEXPR const rvv_op_info all_v_scalar_ptr_uint16_index_ops
+ = {all_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ scalar_ptr_uint16_index_args /* Args */};
+
+/* A static operand information for void func (scalar_type *, uint32_index_type,
+ * vector_type) function registration. */
+static CONSTEXPR const rvv_op_info all_v_scalar_ptr_uint32_index_ops
+ = {all_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ scalar_ptr_uint32_index_args /* Args */};
+
+/* A static operand information for void func (scalar_type *, uint64_index_type,
+ * vector_type) function registration. */
+static CONSTEXPR const rvv_op_info all_v_scalar_ptr_uint64_index_ops
+ = {all_ops, /* Types */
+ OP_TYPE_v, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_void), /* Return type */
+ scalar_ptr_uint64_index_args /* Args */};
+
/* A list of all RVV intrinsic functions. */
static function_group_info function_groups[] = {
#define DEF_RVV_FUNCTION(NAME, SHAPE, PREDS, OPS_INFO) \
builtin_types[type].vector_ptr = build_pointer_type (vectype);
}
+/* Return true if the type has required_extensions. */
+static bool
+required_extensions_p (enum rvv_base_type type)
+{
+ return type == RVV_BASE_vector || type == RVV_BASE_uint8_index
+ || type == RVV_BASE_uint16_index || type == RVV_BASE_uint32_index
+ || type == RVV_BASE_uint64_index;
+}
+
/* Check whether all the RVV_REQUIRE_* values in REQUIRED_EXTENSIONS are
enabled. */
static bool
-check_required_extensions (uint64_t required_extensions)
+check_required_extensions (const function_instance &instance)
{
+ rvv_type_info type_info = instance.type;
+ uint64_t required_extensions = type_info.required_extensions;
+ const rvv_op_info *op_info = instance.op_info;
+ tree type = builtin_types[type_info.index].vector;
+ for (unsigned i = 0; op_info->args[i].base_type != NUM_BASE_TYPES; ++i)
+ {
+ if (!required_extensions_p (op_info->args[i].base_type))
+ continue;
+
+ enum vector_type_index vector_type
+ = op_info->args[i].get_base_vector_type (type);
+ if (vector_type == NUM_VECTOR_TYPES)
+ continue;
+ required_extensions |= op_info->types[vector_type].required_extensions;
+
+ /* According to RVV ISA, EEW=64 index of indexed loads/stores require
+ XLEN = 64. */
+ if (op_info->args[i].base_type == RVV_BASE_uint64_index)
+ required_extensions |= RVV_REQUIRE_RV64BIT;
+ }
+
uint64_t riscv_isa_flags = 0;
if (TARGET_VECTOR_ELEN_FP_32)
return gen_int_mode (get_prefer_mask_policy (), Pmode);
}
+vector_type_index
+rvv_arg_type_info::get_base_vector_type (tree type) const
+{
+ if (!type)
+ return NUM_VECTOR_TYPES;
+ poly_int64 nunits = GET_MODE_NUNITS (TYPE_MODE (type));
+ machine_mode inner_mode;
+ bool unsigned_p = TYPE_UNSIGNED (type);
+ switch (base_type)
+ {
+ case RVV_BASE_uint8_index:
+ inner_mode = E_QImode;
+ unsigned_p = true;
+ break;
+ case RVV_BASE_uint16_index:
+ inner_mode = E_HImode;
+ unsigned_p = true;
+ break;
+ case RVV_BASE_uint32_index:
+ inner_mode = E_SImode;
+ unsigned_p = true;
+ break;
+ case RVV_BASE_uint64_index:
+ inner_mode = E_DImode;
+ unsigned_p = true;
+ break;
+ default:
+ return NUM_VECTOR_TYPES;
+ }
+
+ opt_machine_mode mode
+ = get_vector_mode (as_a<scalar_mode> (inner_mode), nunits);
+
+ if (!mode.exists ())
+ return NUM_VECTOR_TYPES;
+ for (unsigned int i = 0; i < NUM_VECTOR_TYPES + 1; i++)
+ {
+ tree vector_type = builtin_types[i].vector;
+ if (!vector_type)
+ continue;
+
+ if (TYPE_UNSIGNED (vector_type) != unsigned_p)
+ continue;
+
+ if (TYPE_MODE (vector_type) == mode.require ())
+ return (enum vector_type_index) i;
+ }
+ return NUM_VECTOR_TYPES;
+}
+
tree
rvv_arg_type_info::get_tree_type (vector_type_index type_idx) const
{
return long_unsigned_type_node;
case RVV_BASE_long:
return long_integer_type_node;
+ case RVV_BASE_uint8_index:
+ case RVV_BASE_uint16_index:
+ case RVV_BASE_uint32_index:
+ case RVV_BASE_uint64_index:
+ if (get_base_vector_type (builtin_types[type_idx].vector)
+ != NUM_VECTOR_TYPES)
+ return builtin_types[get_base_vector_type (
+ builtin_types[type_idx].vector)].vector;
+ break;
default:
gcc_unreachable ();
}
+ /* Return NULL_TREE if the type we don't want to register. */
+ return NULL_TREE;
}
function_instance::function_instance (const char *base_name_in,
vec<tree> &argument_types)
{
/* Do not add this function if it is invalid. */
- if (!check_required_extensions (instance.type.required_extensions))
+ if (!check_required_extensions (instance))
return;
/* Add the function under its full (unique) name. */
add_fixed_operand (mem);
}
+/* Implement the call using instruction ICODE, with a 1:1 mapping between
+ arguments and input operands. */
+rtx
+function_expander::use_exact_insn (insn_code icode)
+{
+ machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
+ tree mask_type = builtin_types[mask_types[type.index]].vector;
+ machine_mode mask_mode = TYPE_MODE (mask_type);
+
+ /* Record the offset to get the argument. */
+ int arg_offset = 0;
+
+ if (use_real_mask_p (pred))
+ add_input_operand (arg_offset++);
+ else
+ add_all_one_mask_operand (mask_mode);
+
+ /* Store operation doesn't have merge operand. */
+ if (!function_returns_void_p ())
+ {
+ if (use_real_merge_p (pred))
+ add_input_operand (arg_offset++);
+ else
+ add_vundef_operand (mode);
+ }
+
+ for (int argno = arg_offset; argno < call_expr_nargs (exp); argno++)
+ add_input_operand (argno);
+
+ if (base->apply_tail_policy_p ())
+ add_input_operand (Pmode, get_tail_policy_for_pred (pred));
+ if (base->apply_mask_policy_p ())
+ add_input_operand (Pmode, get_mask_policy_for_pred (pred));
+
+ add_input_operand (Pmode, get_avl_type_rtx (avl_type::NONVLMAX));
+ return generate_insn (icode);
+}
+
/* Use contiguous load INSN. */
rtx
function_expander::use_contiguous_load_insn (insn_code icode)
RVV_BASE_ptrdiff,
RVV_BASE_unsigned_long,
RVV_BASE_long,
+ RVV_BASE_uint8_index,
+ RVV_BASE_uint16_index,
+ RVV_BASE_uint32_index,
+ RVV_BASE_uint64_index,
NUM_BASE_TYPES
};
{}
enum rvv_base_type base_type;
+ vector_type_index get_base_vector_type (tree type) const;
tree get_tree_type (vector_type_index) const;
};
void add_mem_operand (machine_mode, unsigned);
machine_mode vector_mode (void) const;
+ machine_mode index_mode (void) const;
+ rtx use_exact_insn (insn_code);
rtx use_contiguous_load_insn (insn_code);
rtx use_contiguous_store_insn (insn_code);
rtx generate_insn (insn_code);
/* Return true if intrinsics should apply vl operand. */
virtual bool apply_vl_p () const;
+ /* Return true if intrinsics should apply tail policy operand. */
+ virtual bool apply_tail_policy_p () const;
+
+ /* Return true if intrinsics should apply mask policy operand. */
+ virtual bool apply_mask_policy_p () const;
+
/* Return true if intrinsic can be overloaded. */
virtual bool can_be_overloaded_p (enum predication_type_index) const;
return TYPE_MODE (builtin_types[type.index].vector);
}
+/* Return the machine_mode of the corresponding index type. */
+inline machine_mode
+function_expander::index_mode (void) const
+{
+ return TYPE_MODE (op_info->args[1].get_tree_type (type.index));
+}
+
/* Default implementation of function_base::call_properties, with conservatively
correct behavior for floating-point instructions. */
inline unsigned int
return true;
}
+/* We choose to apply tail policy operand by default since most of the
+ intrinsics has tail policy operand. */
+inline bool
+function_base::apply_tail_policy_p () const
+{
+ return true;
+}
+
+/* We choose to apply mask policy operand by default since most of the
+ intrinsics has mask policy operand. */
+inline bool
+function_base::apply_mask_policy_p () const
+{
+ return true;
+}
+
/* Since most of intrinsics can be overloaded, we set it true by default. */
inline bool
function_base::can_be_overloaded_p (enum predication_type_index) const
;; along with GCC; see the file COPYING3. If not see
;; <http://www.gnu.org/licenses/>.
+(define_c_enum "unspec" [
+ UNSPEC_VSETVL
+ UNSPEC_VUNDEF
+ UNSPEC_VPREDICATE
+ UNSPEC_VLMAX
+ UNSPEC_STRIDED
+
+ ;; It's used to specify ordered/unorderd operation.
+ UNSPEC_ORDERED
+ UNSPEC_UNORDERED
+])
+
(define_mode_iterator V [
VNx1QI VNx2QI VNx4QI VNx8QI VNx16QI VNx32QI (VNx64QI "TARGET_MIN_VLEN > 32")
VNx1HI VNx2HI VNx4HI VNx8HI VNx16HI (VNx32HI "TARGET_MIN_VLEN > 32")
(VNx8DF "TARGET_VECTOR_ELEN_FP_64")
])
+(define_mode_iterator VNX1_QHSD [
+ VNx1QI VNx1HI VNx1SI
+ (VNx1DI "TARGET_MIN_VLEN > 32")
+ (VNx1SF "TARGET_VECTOR_ELEN_FP_32")
+ (VNx1DF "TARGET_VECTOR_ELEN_FP_64")
+])
+
+(define_mode_iterator VNX2_QHSD [
+ VNx2QI VNx2HI VNx2SI
+ (VNx2DI "TARGET_MIN_VLEN > 32")
+ (VNx2SF "TARGET_VECTOR_ELEN_FP_32")
+ (VNx2DF "TARGET_VECTOR_ELEN_FP_64")
+])
+
+(define_mode_iterator VNX4_QHSD [
+ VNx4QI VNx4HI VNx4SI
+ (VNx4DI "TARGET_MIN_VLEN > 32")
+ (VNx4SF "TARGET_VECTOR_ELEN_FP_32")
+ (VNx4DF "TARGET_VECTOR_ELEN_FP_64")
+])
+
+(define_mode_iterator VNX8_QHSD [
+ VNx8QI VNx8HI VNx8SI
+ (VNx8DI "TARGET_MIN_VLEN > 32")
+ (VNx8SF "TARGET_VECTOR_ELEN_FP_32")
+ (VNx8DF "TARGET_VECTOR_ELEN_FP_64")
+])
+
+(define_mode_iterator VNX16_QHS [
+ VNx16QI VNx16HI (VNx16SI "TARGET_MIN_VLEN > 32")
+ (VNx16SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32")
+])
+
+(define_mode_iterator VNX32_QH [
+ VNx32QI (VNx32HI "TARGET_MIN_VLEN > 32")
+])
+
+(define_mode_iterator VNX64_Q [
+ (VNx64QI "TARGET_MIN_VLEN > 32")
+])
+
+(define_mode_iterator VNX1_QHSDI [
+ VNx1QI VNx1HI VNx1SI
+ (VNx1DI "TARGET_64BIT && TARGET_MIN_VLEN > 32")
+])
+
+(define_mode_iterator VNX2_QHSDI [
+ VNx2QI VNx2HI VNx2SI
+ (VNx2DI "TARGET_64BIT && TARGET_MIN_VLEN > 32")
+])
+
+(define_mode_iterator VNX4_QHSDI [
+ VNx4QI VNx4HI VNx4SI
+ (VNx4DI "TARGET_64BIT && TARGET_MIN_VLEN > 32")
+])
+
+(define_mode_iterator VNX8_QHSDI [
+ VNx8QI VNx8HI VNx8SI
+ (VNx8DI "TARGET_64BIT && TARGET_MIN_VLEN > 32")
+])
+
+(define_mode_iterator VNX16_QHSI [
+ VNx16QI VNx16HI (VNx16SI "TARGET_MIN_VLEN > 32")
+])
+
+(define_mode_iterator VNX32_QHI [
+ VNx32QI (VNx32HI "TARGET_MIN_VLEN > 32")
+])
+
(define_mode_iterator V_WHOLE [
(VNx4QI "TARGET_MIN_VLEN == 32") VNx8QI VNx16QI VNx32QI (VNx64QI "TARGET_MIN_VLEN > 32")
(VNx2HI "TARGET_MIN_VLEN == 32") VNx4HI VNx8HI VNx16HI (VNx32HI "TARGET_MIN_VLEN > 32")
(VNx1SF "32") (VNx2SF "32") (VNx4SF "32") (VNx8SF "32") (VNx16SF "32")
(VNx1DF "64") (VNx2DF "64") (VNx4DF "64") (VNx8DF "64")
])
+
+(define_int_iterator ORDER [UNSPEC_ORDERED UNSPEC_UNORDERED])
+
+(define_int_attr order [
+ (UNSPEC_ORDERED "o") (UNSPEC_UNORDERED "u")
+])
(include "vector-iterators.md")
-(define_c_enum "unspec" [
- UNSPEC_VSETVL
- UNSPEC_VUNDEF
- UNSPEC_VPREDICATE
- UNSPEC_VLMAX
- UNSPEC_STRIDED
-])
-
(define_constants [
(INVALID_ATTRIBUTE 255)
])
;; It is valid for instruction that require sew/lmul ratio.
(define_attr "ratio" ""
- (cond [(eq_attr "type" "vimov,vfmov")
+ (cond [(eq_attr "type" "vimov,vfmov,vldux,vldox,vstux,vstox")
(const_int INVALID_ATTRIBUTE)
(eq_attr "mode" "VNx1QI,VNx1BI")
(symbol_ref "riscv_vector::get_ratio(E_VNx1QImode)")
;; The index of operand[] to get the merge op.
(define_attr "merge_op_idx" ""
- (cond [(eq_attr "type" "vlde,vimov,vfmov,vldm,vlds,vmalu")
+ (cond [(eq_attr "type" "vlde,vimov,vfmov,vldm,vlds,vmalu,vldux,vldox")
(const_int 2)]
(const_int INVALID_ATTRIBUTE)))
;; The index of operand[] to get the avl op.
(define_attr "vl_op_idx" ""
- (cond [(eq_attr "type" "vlde,vste,vimov,vfmov,vldm,vstm,vmalu,vsts")
+ (cond [(eq_attr "type" "vlde,vste,vimov,vfmov,vldm,vstm,vmalu,vsts,vstux,vstox")
(const_int 4)
;; If operands[3] of "vlds" is not vector mode, it is pred_broadcast.
(eq_attr "type" "vlds")
(if_then_else (match_test "VECTOR_MODE_P (GET_MODE (operands[3]))")
(const_int 5)
- (const_int 4))]
+ (const_int 4))
+
+ (eq_attr "type" "vldux,vldox")
+ (const_int 5)]
(const_int INVALID_ATTRIBUTE)))
;; The tail policy op value.
(eq_attr "type" "vlds")
(if_then_else (match_test "VECTOR_MODE_P (GET_MODE (operands[3]))")
(symbol_ref "riscv_vector::get_ta(operands[6])")
- (symbol_ref "riscv_vector::get_ta(operands[5])"))]
+ (symbol_ref "riscv_vector::get_ta(operands[5])"))
+
+ (eq_attr "type" "vldux,vldox")
+ (symbol_ref "riscv_vector::get_ta(operands[6])")]
(const_int INVALID_ATTRIBUTE)))
;; The mask policy op value.
(eq_attr "type" "vlds")
(if_then_else (match_test "VECTOR_MODE_P (GET_MODE (operands[3]))")
(symbol_ref "riscv_vector::get_ma(operands[7])")
- (symbol_ref "riscv_vector::get_ma(operands[6])"))]
+ (symbol_ref "riscv_vector::get_ma(operands[6])"))
+
+ (eq_attr "type" "vldux,vldox")
+ (symbol_ref "riscv_vector::get_ma(operands[7])")]
(const_int INVALID_ATTRIBUTE)))
;; The avl type value.
(eq_attr "type" "vlds")
(if_then_else (match_test "VECTOR_MODE_P (GET_MODE (operands[3]))")
(const_int INVALID_ATTRIBUTE)
- (symbol_ref "INTVAL (operands[7])"))]
+ (symbol_ref "INTVAL (operands[7])"))
+
+ (eq_attr "type" "vldux,vldox")
+ (symbol_ref "INTVAL (operands[8])")
+ (eq_attr "type" "vstux,vstox")
+ (symbol_ref "INTVAL (operands[5])")]
(const_int INVALID_ATTRIBUTE)))
;; -----------------------------------------------------------------
"vsse<sew>.v\t%3,%0,%z2%p1"
[(set_attr "type" "vsts")
(set_attr "mode" "<MODE>")])
+
+;; -------------------------------------------------------------------------------
+;; ---- Predicated indexed loads/stores
+;; -------------------------------------------------------------------------------
+;; Includes:
+;; - 7.6. Vector Indexed Instructions
+;; -------------------------------------------------------------------------------
+
+(define_insn "@pred_indexed_<order>load<VNX1_QHSD:mode><VNX1_QHSDI:mode>"
+ [(set (match_operand:VNX1_QHSD 0 "register_operand" "=&vr")
+ (if_then_else:VNX1_QHSD
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VNX1_QHSD
+ [(match_operand 3 "pmode_register_operand" " r")
+ (mem:BLK (scratch))
+ (match_operand:VNX1_QHSDI 4 "register_operand" " vr")] ORDER)
+ (match_operand:VNX1_QHSD 2 "vector_merge_operand" "0vu")))]
+ "TARGET_VECTOR"
+ "vl<order>xei<VNX1_QHSDI:sew>.v\t%0,(%3),%4%p1"
+ [(set_attr "type" "vld<order>x")
+ (set_attr "mode" "<VNX1_QHSD:MODE>")])
+
+(define_insn "@pred_indexed_<order>store<VNX1_QHSD:mode><VNX1_QHSDI:mode>"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+ (match_operand 4 "vector_length_operand" " rK")
+ (match_operand 5 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (match_operand 1 "pmode_register_operand" " r")
+ (match_operand:VNX1_QHSDI 2 "register_operand" " vr")
+ (match_operand:VNX1_QHSD 3 "register_operand" " vr")] ORDER))]
+ "TARGET_VECTOR"
+ "vs<order>xei<VNX1_QHSDI:sew>.v\t%3,(%1),%2%p0"
+ [(set_attr "type" "vst<order>x")
+ (set_attr "mode" "<VNX1_QHSD:MODE>")])
+
+(define_insn "@pred_indexed_<order>load<VNX2_QHSD:mode><VNX2_QHSDI:mode>"
+ [(set (match_operand:VNX2_QHSD 0 "register_operand" "=&vr")
+ (if_then_else:VNX2_QHSD
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VNX2_QHSD
+ [(match_operand 3 "pmode_register_operand" " r")
+ (mem:BLK (scratch))
+ (match_operand:VNX2_QHSDI 4 "register_operand" " vr")] ORDER)
+ (match_operand:VNX2_QHSD 2 "vector_merge_operand" "0vu")))]
+ "TARGET_VECTOR"
+ "vl<order>xei<VNX2_QHSDI:sew>.v\t%0,(%3),%4%p1"
+ [(set_attr "type" "vld<order>x")
+ (set_attr "mode" "<VNX2_QHSD:MODE>")])
+
+(define_insn "@pred_indexed_<order>store<VNX2_QHSD:mode><VNX2_QHSDI:mode>"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+ (match_operand 4 "vector_length_operand" " rK")
+ (match_operand 5 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (match_operand 1 "pmode_register_operand" " r")
+ (match_operand:VNX2_QHSDI 2 "register_operand" " vr")
+ (match_operand:VNX2_QHSD 3 "register_operand" " vr")] ORDER))]
+ "TARGET_VECTOR"
+ "vs<order>xei<VNX2_QHSDI:sew>.v\t%3,(%1),%2%p0"
+ [(set_attr "type" "vst<order>x")
+ (set_attr "mode" "<VNX2_QHSD:MODE>")])
+
+(define_insn "@pred_indexed_<order>load<VNX4_QHSD:mode><VNX4_QHSDI:mode>"
+ [(set (match_operand:VNX4_QHSD 0 "register_operand" "=&vr")
+ (if_then_else:VNX4_QHSD
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VNX4_QHSD
+ [(match_operand 3 "pmode_register_operand" " r")
+ (mem:BLK (scratch))
+ (match_operand:VNX4_QHSDI 4 "register_operand" " vr")] ORDER)
+ (match_operand:VNX4_QHSD 2 "vector_merge_operand" "0vu")))]
+ "TARGET_VECTOR"
+ "vl<order>xei<VNX4_QHSDI:sew>.v\t%0,(%3),%4%p1"
+ [(set_attr "type" "vld<order>x")
+ (set_attr "mode" "<VNX4_QHSD:MODE>")])
+
+(define_insn "@pred_indexed_<order>store<VNX4_QHSD:mode><VNX4_QHSDI:mode>"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+ (match_operand 4 "vector_length_operand" " rK")
+ (match_operand 5 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (match_operand 1 "pmode_register_operand" " r")
+ (match_operand:VNX4_QHSDI 2 "register_operand" " vr")
+ (match_operand:VNX4_QHSD 3 "register_operand" " vr")] ORDER))]
+ "TARGET_VECTOR"
+ "vs<order>xei<VNX4_QHSDI:sew>.v\t%3,(%1),%2%p0"
+ [(set_attr "type" "vst<order>x")
+ (set_attr "mode" "<VNX4_QHSD:MODE>")])
+
+(define_insn "@pred_indexed_<order>load<VNX8_QHSD:mode><VNX8_QHSDI:mode>"
+ [(set (match_operand:VNX8_QHSD 0 "register_operand" "=&vr")
+ (if_then_else:VNX8_QHSD
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VNX8_QHSD
+ [(match_operand 3 "pmode_register_operand" " r")
+ (mem:BLK (scratch))
+ (match_operand:VNX8_QHSDI 4 "register_operand" " vr")] ORDER)
+ (match_operand:VNX8_QHSD 2 "vector_merge_operand" "0vu")))]
+ "TARGET_VECTOR"
+ "vl<order>xei<VNX8_QHSDI:sew>.v\t%0,(%3),%4%p1"
+ [(set_attr "type" "vld<order>x")
+ (set_attr "mode" "<VNX8_QHSD:MODE>")])
+
+(define_insn "@pred_indexed_<order>store<VNX8_QHSD:mode><VNX8_QHSDI:mode>"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+ (match_operand 4 "vector_length_operand" " rK")
+ (match_operand 5 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (match_operand 1 "pmode_register_operand" " r")
+ (match_operand:VNX8_QHSDI 2 "register_operand" " vr")
+ (match_operand:VNX8_QHSD 3 "register_operand" " vr")] ORDER))]
+ "TARGET_VECTOR"
+ "vs<order>xei<VNX8_QHSDI:sew>.v\t%3,(%1),%2%p0"
+ [(set_attr "type" "vst<order>x")
+ (set_attr "mode" "<VNX8_QHSD:MODE>")])
+
+(define_insn "@pred_indexed_<order>load<VNX16_QHS:mode><VNX16_QHSI:mode>"
+ [(set (match_operand:VNX16_QHS 0 "register_operand" "=&vr")
+ (if_then_else:VNX16_QHS
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VNX16_QHS
+ [(match_operand 3 "pmode_register_operand" " r")
+ (mem:BLK (scratch))
+ (match_operand:VNX16_QHSI 4 "register_operand" " vr")] ORDER)
+ (match_operand:VNX16_QHS 2 "vector_merge_operand" "0vu")))]
+ "TARGET_VECTOR"
+ "vl<order>xei<VNX16_QHSI:sew>.v\t%0,(%3),%4%p1"
+ [(set_attr "type" "vld<order>x")
+ (set_attr "mode" "<VNX16_QHS:MODE>")])
+
+(define_insn "@pred_indexed_<order>store<VNX16_QHS:mode><VNX16_QHSI:mode>"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+ (match_operand 4 "vector_length_operand" " rK")
+ (match_operand 5 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (match_operand 1 "pmode_register_operand" " r")
+ (match_operand:VNX16_QHSI 2 "register_operand" " vr")
+ (match_operand:VNX16_QHS 3 "register_operand" " vr")] ORDER))]
+ "TARGET_VECTOR"
+ "vs<order>xei<VNX16_QHSI:sew>.v\t%3,(%1),%2%p0"
+ [(set_attr "type" "vst<order>x")
+ (set_attr "mode" "<VNX16_QHS:MODE>")])
+
+(define_insn "@pred_indexed_<order>load<VNX32_QH:mode><VNX32_QHI:mode>"
+ [(set (match_operand:VNX32_QH 0 "register_operand" "=&vr")
+ (if_then_else:VNX32_QH
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VNX32_QH
+ [(match_operand 3 "pmode_register_operand" " r")
+ (mem:BLK (scratch))
+ (match_operand:VNX32_QHI 4 "register_operand" " vr")] ORDER)
+ (match_operand:VNX32_QH 2 "vector_merge_operand" "0vu")))]
+ "TARGET_VECTOR"
+ "vl<order>xei<VNX32_QHI:sew>.v\t%0,(%3),%4%p1"
+ [(set_attr "type" "vld<order>x")
+ (set_attr "mode" "<VNX32_QH:MODE>")])
+
+(define_insn "@pred_indexed_<order>store<VNX32_QH:mode><VNX32_QHI:mode>"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+ (match_operand 4 "vector_length_operand" " rK")
+ (match_operand 5 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (match_operand 1 "pmode_register_operand" " r")
+ (match_operand:VNX32_QHI 2 "register_operand" " vr")
+ (match_operand:VNX32_QH 3 "register_operand" " vr")] ORDER))]
+ "TARGET_VECTOR"
+ "vs<order>xei<VNX32_QHI:sew>.v\t%3,(%1),%2%p0"
+ [(set_attr "type" "vst<order>x")
+ (set_attr "mode" "<VNX32_QH:MODE>")])
+
+(define_insn "@pred_indexed_<order>load<VNX64_Q:mode><VNX64_Q:mode>"
+ [(set (match_operand:VNX64_Q 0 "register_operand" "=&vr")
+ (if_then_else:VNX64_Q
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1")
+ (match_operand 5 "vector_length_operand" " rK")
+ (match_operand 6 "const_int_operand" " i")
+ (match_operand 7 "const_int_operand" " i")
+ (match_operand 8 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VNX64_Q
+ [(match_operand 3 "pmode_register_operand" " r")
+ (mem:BLK (scratch))
+ (match_operand:VNX64_Q 4 "register_operand" " vr")] ORDER)
+ (match_operand:VNX64_Q 2 "vector_merge_operand" "0vu")))]
+ "TARGET_VECTOR"
+ "vl<order>xei<VNX64_Q:sew>.v\t%0,(%3),%4%p1"
+ [(set_attr "type" "vld<order>x")
+ (set_attr "mode" "<VNX64_Q:MODE>")])
+
+(define_insn "@pred_indexed_<order>store<VNX64_Q:mode><VNX64_Q:mode>"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK
+ [(unspec:<VM>
+ [(match_operand:<VM> 0 "vector_mask_operand" "vmWc1")
+ (match_operand 4 "vector_length_operand" " rK")
+ (match_operand 5 "const_int_operand" " i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (match_operand 1 "pmode_register_operand" " r")
+ (match_operand:VNX64_Q 2 "register_operand" " vr")
+ (match_operand:VNX64_Q 3 "register_operand" " vr")] ORDER))]
+ "TARGET_VECTOR"
+ "vs<order>xei<VNX64_Q:sew>.v\t%3,(%1),%2%p0"
+ [(set_attr "type" "vst<order>x")
+ (set_attr "mode" "<VNX64_Q:MODE>")])