{"f", &gcc_options::x_target_flags, MASK_HARD_FLOAT},
{"d", &gcc_options::x_target_flags, MASK_DOUBLE_FLOAT},
{"c", &gcc_options::x_target_flags, MASK_RVC},
+ {"v", &gcc_options::x_target_flags, MASK_FULL_V},
{"v", &gcc_options::x_target_flags, MASK_VECTOR},
{"zicsr", &gcc_options::x_riscv_zi_subext, MASK_ZICSR},
}
};
+/* Implements vmulh/vmulhu/vmulhsu. */
+template<int UNSPEC>
+class vmulh : public function_base
+{
+public:
+ rtx expand (function_expander &e) const override
+ {
+ switch (e.op_info->op)
+ {
+ case OP_TYPE_vx:
+ return e.use_exact_insn (
+ code_for_pred_mulh_scalar (UNSPEC, e.vector_mode ()));
+ case OP_TYPE_vv:
+ return e.use_exact_insn (
+ code_for_pred_mulh (UNSPEC, e.vector_mode ()));
+ default:
+ gcc_unreachable ();
+ }
+ }
+};
+
static CONSTEXPR const vsetvl<false> vsetvl_obj;
static CONSTEXPR const vsetvl<true> vsetvlmax_obj;
static CONSTEXPR const loadstore<false, LST_UNIT_STRIDE, false> vle_obj;
static CONSTEXPR const binop<UMIN> vminu_obj;
static CONSTEXPR const binop<UMAX> vmaxu_obj;
static CONSTEXPR const binop<MULT> vmul_obj;
+static CONSTEXPR const vmulh<UNSPEC_VMULHS> vmulh_obj;
+static CONSTEXPR const vmulh<UNSPEC_VMULHU> vmulhu_obj;
+static CONSTEXPR const vmulh<UNSPEC_VMULHSU> vmulhsu_obj;
static CONSTEXPR const binop<DIV> vdiv_obj;
static CONSTEXPR const binop<MOD> vrem_obj;
static CONSTEXPR const binop<UDIV> vdivu_obj;
BASE (vminu)
BASE (vmaxu)
BASE (vmul)
+BASE (vmulh)
+BASE (vmulhu)
+BASE (vmulhsu)
BASE (vdiv)
BASE (vrem)
BASE (vdivu)
extern const function_base *const vminu;
extern const function_base *const vmaxu;
extern const function_base *const vmul;
+extern const function_base *const vmulh;
+extern const function_base *const vmulhu;
+extern const function_base *const vmulhsu;
extern const function_base *const vdiv;
extern const function_base *const vrem;
extern const function_base *const vdivu;
DEF_RVV_FUNCTION (vminu, alu, full_preds, u_vvv_ops)
DEF_RVV_FUNCTION (vmaxu, alu, full_preds, u_vvv_ops)
DEF_RVV_FUNCTION (vmul, alu, full_preds, iu_vvv_ops)
+DEF_RVV_FUNCTION (vmulh, alu, full_preds, full_v_i_vvv_ops)
+DEF_RVV_FUNCTION (vmulhu, alu, full_preds, full_v_u_vvv_ops)
+DEF_RVV_FUNCTION (vmulhsu, alu, full_preds, full_v_i_su_vvv_ops)
DEF_RVV_FUNCTION (vdiv, alu, full_preds, i_vvv_ops)
DEF_RVV_FUNCTION (vrem, alu, full_preds, i_vvv_ops)
DEF_RVV_FUNCTION (vdivu, alu, full_preds, u_vvv_ops)
DEF_RVV_FUNCTION (vminu, alu, full_preds, u_vvx_ops)
DEF_RVV_FUNCTION (vmaxu, alu, full_preds, u_vvx_ops)
DEF_RVV_FUNCTION (vmul, alu, full_preds, iu_vvx_ops)
+DEF_RVV_FUNCTION (vmulh, alu, full_preds, full_v_i_vvx_ops)
+DEF_RVV_FUNCTION (vmulhu, alu, full_preds, full_v_u_vvx_ops)
+DEF_RVV_FUNCTION (vmulhsu, alu, full_preds, full_v_i_su_vvx_ops)
DEF_RVV_FUNCTION (vdiv, alu, full_preds, i_vvx_ops)
DEF_RVV_FUNCTION (vrem, alu, full_preds, i_vvx_ops)
DEF_RVV_FUNCTION (vdivu, alu, full_preds, u_vvx_ops)
#define DEF_RVV_OEXTU_OPS(TYPE, REQUIRE)
#endif
+/* Use "DEF_RVV_FULL_V_I_OPS" macro include all signed integer that require full
+ 'V' extension which will be iterated and registered as intrinsic functions.
+ */
+#ifndef DEF_RVV_FULL_V_I_OPS
+#define DEF_RVV_FULL_V_I_OPS(TYPE, REQUIRE)
+#endif
+
+/* Use "DEF_RVV_FULL_V_U_OPS" macro include all unsigned integer that require
+ full 'V' extension which will be iterated and registered as intrinsic
+ functions. */
+#ifndef DEF_RVV_FULL_V_U_OPS
+#define DEF_RVV_FULL_V_U_OPS(TYPE, REQUIRE)
+#endif
+
DEF_RVV_I_OPS (vint8mf8_t, RVV_REQUIRE_ZVE64)
DEF_RVV_I_OPS (vint8mf4_t, 0)
DEF_RVV_I_OPS (vint8mf2_t, 0)
DEF_RVV_OEXTU_OPS (vuint64m4_t, RVV_REQUIRE_ZVE64)
DEF_RVV_OEXTU_OPS (vuint64m8_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_FULL_V_I_OPS (vint8mf8_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_FULL_V_I_OPS (vint8mf4_t, 0)
+DEF_RVV_FULL_V_I_OPS (vint8mf2_t, 0)
+DEF_RVV_FULL_V_I_OPS (vint8m1_t, 0)
+DEF_RVV_FULL_V_I_OPS (vint8m2_t, 0)
+DEF_RVV_FULL_V_I_OPS (vint8m4_t, 0)
+DEF_RVV_FULL_V_I_OPS (vint8m8_t, 0)
+DEF_RVV_FULL_V_I_OPS (vint16mf4_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_FULL_V_I_OPS (vint16mf2_t, 0)
+DEF_RVV_FULL_V_I_OPS (vint16m1_t, 0)
+DEF_RVV_FULL_V_I_OPS (vint16m2_t, 0)
+DEF_RVV_FULL_V_I_OPS (vint16m4_t, 0)
+DEF_RVV_FULL_V_I_OPS (vint16m8_t, 0)
+DEF_RVV_FULL_V_I_OPS (vint32mf2_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_FULL_V_I_OPS (vint32m1_t, 0)
+DEF_RVV_FULL_V_I_OPS (vint32m2_t, 0)
+DEF_RVV_FULL_V_I_OPS (vint32m4_t, 0)
+DEF_RVV_FULL_V_I_OPS (vint32m8_t, 0)
+DEF_RVV_FULL_V_I_OPS (vint64m1_t, RVV_REQUIRE_FULL_V)
+DEF_RVV_FULL_V_I_OPS (vint64m2_t, RVV_REQUIRE_FULL_V)
+DEF_RVV_FULL_V_I_OPS (vint64m4_t, RVV_REQUIRE_FULL_V)
+DEF_RVV_FULL_V_I_OPS (vint64m8_t, RVV_REQUIRE_FULL_V)
+
+DEF_RVV_FULL_V_U_OPS (vuint8mf8_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_FULL_V_U_OPS (vuint8mf4_t, 0)
+DEF_RVV_FULL_V_U_OPS (vuint8mf2_t, 0)
+DEF_RVV_FULL_V_U_OPS (vuint8m1_t, 0)
+DEF_RVV_FULL_V_U_OPS (vuint8m2_t, 0)
+DEF_RVV_FULL_V_U_OPS (vuint8m4_t, 0)
+DEF_RVV_FULL_V_U_OPS (vuint8m8_t, 0)
+DEF_RVV_FULL_V_U_OPS (vuint16mf4_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_FULL_V_U_OPS (vuint16mf2_t, 0)
+DEF_RVV_FULL_V_U_OPS (vuint16m1_t, 0)
+DEF_RVV_FULL_V_U_OPS (vuint16m2_t, 0)
+DEF_RVV_FULL_V_U_OPS (vuint16m4_t, 0)
+DEF_RVV_FULL_V_U_OPS (vuint16m8_t, 0)
+DEF_RVV_FULL_V_U_OPS (vuint32mf2_t, RVV_REQUIRE_ZVE64)
+DEF_RVV_FULL_V_U_OPS (vuint32m1_t, 0)
+DEF_RVV_FULL_V_U_OPS (vuint32m2_t, 0)
+DEF_RVV_FULL_V_U_OPS (vuint32m4_t, 0)
+DEF_RVV_FULL_V_U_OPS (vuint32m8_t, 0)
+DEF_RVV_FULL_V_U_OPS (vuint64m1_t, RVV_REQUIRE_FULL_V)
+DEF_RVV_FULL_V_U_OPS (vuint64m2_t, RVV_REQUIRE_FULL_V)
+DEF_RVV_FULL_V_U_OPS (vuint64m4_t, RVV_REQUIRE_FULL_V)
+DEF_RVV_FULL_V_U_OPS (vuint64m8_t, RVV_REQUIRE_FULL_V)
+
#undef DEF_RVV_I_OPS
#undef DEF_RVV_U_OPS
#undef DEF_RVV_F_OPS
#undef DEF_RVV_WEXTU_OPS
#undef DEF_RVV_QEXTU_OPS
#undef DEF_RVV_OEXTU_OPS
+#undef DEF_RVV_FULL_V_I_OPS
+#undef DEF_RVV_FULL_V_U_OPS
#include "riscv-vector-builtins-types.def"
{NUM_VECTOR_TYPES, 0}};
+/* A list of all signed integer that SEW = 64 require full 'V' extension will be
+ registered for intrinsic functions. */
+static const rvv_type_info full_v_i_ops[] = {
+#define DEF_RVV_FULL_V_I_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
+/* A list of all unsigned integer that SEW = 64 require full 'V' extension will
+ be registered for intrinsic functions. */
+static const rvv_type_info full_v_u_ops[] = {
+#define DEF_RVV_FULL_V_U_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
+#include "riscv-vector-builtins-types.def"
+ {NUM_VECTOR_TYPES, 0}};
+
/* A list of all signed integer will be registered for intrinsic functions. */
static const rvv_type_info u_ops[] = {
#define DEF_RVV_U_OPS(TYPE, REQUIRE) {VECTOR_TYPE_##TYPE, REQUIRE},
= {rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info (RVV_BASE_vector),
rvv_arg_type_info_end};
+/* A list of args for vector_type func (signed vector_type, unsigned
+ * vector_type) function. */
+static CONSTEXPR const rvv_arg_type_info su_vv_args[]
+ = {rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_unsigned_vector), rvv_arg_type_info_end};
+
/* A list of args for vector_type func (vector_type, scalar_type) function. */
static CONSTEXPR const rvv_arg_type_info vx_args[]
= {rvv_arg_type_info (RVV_BASE_vector), rvv_arg_type_info (RVV_BASE_scalar),
rvv_arg_type_info_end};
+/* A list of args for vector_type func (signed vector_type, unsigned
+ * scalar_type) function. */
+static CONSTEXPR const rvv_arg_type_info su_vx_args[]
+ = {rvv_arg_type_info (RVV_BASE_vector),
+ rvv_arg_type_info (RVV_BASE_unsigned_scalar), rvv_arg_type_info_end};
+
/* A list of args for vector_type func (vector_type, shift_type) function. */
static CONSTEXPR const rvv_arg_type_info shift_vv_args[]
= {rvv_arg_type_info (RVV_BASE_vector),
rvv_arg_type_info (RVV_BASE_vector), /* Return type */
vv_args /* Args */};
+/* A static operand information for vector_type func (vector_type, vector_type)
+ * function registration. */
+static CONSTEXPR const rvv_op_info full_v_i_vvv_ops
+ = {full_v_i_ops, /* Types */
+ OP_TYPE_vv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ vv_args /* Args */};
+
+/* A static operand information for vector_type func (vector_type, vector_type)
+ * function registration. */
+static CONSTEXPR const rvv_op_info full_v_u_vvv_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_vv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ vv_args /* Args */};
+
+/* A static operand information for vector_type func (signed vector_type,
+ * unsigned vector_type) function registration. */
+static CONSTEXPR const rvv_op_info full_v_i_su_vvv_ops
+ = {full_v_i_ops, /* Types */
+ OP_TYPE_vv, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ su_vv_args /* Args */};
+
/* A static operand information for vector_type func (vector_type, scalar_type)
* function registration. */
static CONSTEXPR const rvv_op_info iu_vvx_ops
rvv_arg_type_info (RVV_BASE_vector), /* Return type */
vx_args /* Args */};
+/* A static operand information for vector_type func (vector_type, scalar_type)
+ * function registration that require full 'V' extension. */
+static CONSTEXPR const rvv_op_info full_v_i_vvx_ops
+ = {full_v_i_ops, /* Types */
+ OP_TYPE_vx, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ vx_args /* Args */};
+
+/* A static operand information for vector_type func (vector_type, scalar_type)
+ * function registration that require full 'V' extension. */
+static CONSTEXPR const rvv_op_info full_v_u_vvx_ops
+ = {full_v_u_ops, /* Types */
+ OP_TYPE_vx, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ vx_args /* Args */};
+
+/* A static operand information for vector_type func (signed vector_type,
+ * unsigned scalar_type) function registration that require full 'V' extension.
+ */
+static CONSTEXPR const rvv_op_info full_v_i_su_vvx_ops
+ = {full_v_i_ops, /* Types */
+ OP_TYPE_vx, /* Suffix */
+ rvv_arg_type_info (RVV_BASE_vector), /* Return type */
+ su_vx_args /* Args */};
+
/* A static operand information for vector_type func (vector_type, shift_type)
* function registration. */
static CONSTEXPR const rvv_op_info iu_shift_vvv_ops
riscv_isa_flags |= RVV_REQUIRE_ZVE64;
if (TARGET_64BIT)
riscv_isa_flags |= RVV_REQUIRE_RV64BIT;
+ if (TARGET_FULL_V)
+ riscv_isa_flags |= RVV_REQUIRE_FULL_V;
uint64_t missing_extensions = required_extensions & ~riscv_isa_flags;
if (missing_extensions != 0)
tree
rvv_arg_type_info::get_tree_type (vector_type_index type_idx) const
{
+ /* If the builtin type is not registered means '-march' doesn't
+ satisfy the require extension of the type. For example,
+ vfloat32m1_t require floating-point extension. In this case,
+ just return NULL_TREE. */
+ if (!builtin_types[type_idx].vector)
+ return NULL_TREE;
switch (base_type)
{
case RVV_BASE_vector:
return builtin_types[type_idx].vector;
case RVV_BASE_scalar:
return builtin_types[type_idx].scalar;
+ /* According to riscv-vector-builtins-types.def, the unsigned
+ type is always the signed type + 1 (They have same SEW and LMUL).
+ For example 'vuint8mf8_t' enum = 'vint8mf8_t' enum + 1.
+ Note: We dont't allow type_idx to be unsigned type. */
+ case RVV_BASE_unsigned_vector:
+ gcc_assert (!TYPE_UNSIGNED (builtin_types[type_idx].vector));
+ return builtin_types[type_idx + 1].vector;
+ case RVV_BASE_unsigned_scalar:
+ gcc_assert (!TYPE_UNSIGNED (builtin_types[type_idx].scalar));
+ return builtin_types[type_idx + 1].scalar;
case RVV_BASE_vector_ptr:
return builtin_types[type_idx].vector_ptr;
case RVV_BASE_scalar_ptr:
#define RVV_REQUIRE_ZVE64 (1 << 1) /* Require TARGET_MIN_VLEN > 32. */
#define RVV_REQUIRE_ELEN_FP_32 (1 << 2) /* Require FP ELEN >= 32. */
#define RVV_REQUIRE_ELEN_FP_64 (1 << 3) /* Require FP ELEN >= 64. */
+#define RVV_REQUIRE_FULL_V (1 << 4) /* Require Full 'V' extension. */
/* Enumerates the RVV operand types. */
enum operand_type_index
{
RVV_BASE_vector,
RVV_BASE_scalar,
+ RVV_BASE_unsigned_vector,
+ RVV_BASE_unsigned_scalar,
RVV_BASE_vector_ptr,
RVV_BASE_scalar_ptr,
RVV_BASE_scalar_const_ptr,
Mask(VECTOR)
+Mask(FULL_V)
+
mriscv-attribute
Target Var(riscv_emit_attribute_p) Init(-1)
Emit RISC-V ELF attribute.
;; It's used to specify ordered/unorderd operation.
UNSPEC_ORDERED
UNSPEC_UNORDERED
+
+ ;; vmulh/vmulhu/vmulhsu
+ UNSPEC_VMULHS
+ UNSPEC_VMULHU
+ UNSPEC_VMULHSU
])
(define_mode_iterator V [
(VNx4DI "TARGET_MIN_VLEN > 32") (VNx8DI "TARGET_MIN_VLEN > 32")
])
+(define_mode_iterator VFULLI [
+ VNx1QI VNx2QI VNx4QI VNx8QI VNx16QI VNx32QI (VNx64QI "TARGET_MIN_VLEN > 32")
+ VNx1HI VNx2HI VNx4HI VNx8HI VNx16HI (VNx32HI "TARGET_MIN_VLEN > 32")
+ VNx1SI VNx2SI VNx4SI VNx8SI (VNx16SI "TARGET_MIN_VLEN > 32")
+ (VNx1DI "TARGET_FULL_V") (VNx2DI "TARGET_FULL_V")
+ (VNx4DI "TARGET_FULL_V") (VNx8DI "TARGET_FULL_V")
+])
+
(define_mode_iterator VI_QHS [
VNx1QI VNx2QI VNx4QI VNx8QI VNx16QI VNx32QI (VNx64QI "TARGET_MIN_VLEN > 32")
VNx1HI VNx2HI VNx4HI VNx8HI VNx16HI (VNx32HI "TARGET_MIN_VLEN > 32")
(VNx4DI "TARGET_MIN_VLEN > 32") (VNx8DI "TARGET_MIN_VLEN > 32")
])
+(define_mode_iterator VFULLI_D [
+ (VNx1DI "TARGET_FULL_V") (VNx2DI "TARGET_FULL_V")
+ (VNx4DI "TARGET_FULL_V") (VNx8DI "TARGET_FULL_V")
+])
+
(define_mode_iterator VNX1_QHSD [
VNx1QI VNx1HI VNx1SI
(VNx1DI "TARGET_MIN_VLEN > 32")
(define_int_iterator ORDER [UNSPEC_ORDERED UNSPEC_UNORDERED])
+(define_int_iterator VMULH [UNSPEC_VMULHS UNSPEC_VMULHU UNSPEC_VMULHSU])
+
(define_int_attr order [
(UNSPEC_ORDERED "o") (UNSPEC_UNORDERED "u")
])
+(define_int_attr v_su [(UNSPEC_VMULHS "") (UNSPEC_VMULHU "u") (UNSPEC_VMULHSU "su")])
+
(define_code_iterator any_int_binop [plus minus and ior xor ashift ashiftrt lshiftrt
smax umax smin umin mult div udiv mod umod
])
;; - 11.5 Vector Bitwise Logical Instructions
;; - 11.6 Vector Single-Width Bit Shift Instructions
;; - 11.9 Vector Integer Min/Max Instructions
+;; - 11.10 Vector Single-Width Integer Multiply Instructions
;; - 11.11 Vector Integer Divide Instructions
;; - 12.1 Vector Single-Width Saturating Add and Subtract
;; -------------------------------------------------------------------------------
[(set_attr "type" "<int_binop_insn_type>")
(set_attr "mode" "<MODE>")])
+;; Multiply High instructions.
+(define_insn "@pred_mulh<v_su><mode>"
+ [(set (match_operand:VFULLI 0 "register_operand" "=vd, vr")
+ (if_then_else:VFULLI
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK")
+ (match_operand 6 "const_int_operand" " i, i")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VFULLI
+ [(match_operand:VFULLI 3 "register_operand" " vr, vr")
+ (match_operand:VFULLI 4 "register_operand" " vr, vr")] VMULH)
+ (match_operand:VFULLI 2 "vector_merge_operand" "0vu,0vu")))]
+ "TARGET_VECTOR"
+ "vmulh<v_su>.vv\t%0,%3,%4%p1"
+ [(set_attr "type" "vimul")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "@pred_mulh<v_su><mode>_scalar"
+ [(set (match_operand:VI_QHS 0 "register_operand" "=vd, vr")
+ (if_then_else:VI_QHS
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK")
+ (match_operand 6 "const_int_operand" " i, i")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VI_QHS
+ [(vec_duplicate:VI_QHS
+ (match_operand:<VEL> 4 "register_operand" " r, r"))
+ (match_operand:VI_QHS 3 "register_operand" " vr, vr")] VMULH)
+ (match_operand:VI_QHS 2 "vector_merge_operand" "0vu,0vu")))]
+ "TARGET_VECTOR"
+ "vmulh<v_su>.vx\t%0,%3,%4%p1"
+ [(set_attr "type" "vimul")
+ (set_attr "mode" "<MODE>")])
+
+(define_expand "@pred_mulh<v_su><mode>_scalar"
+ [(set (match_operand:VFULLI_D 0 "register_operand")
+ (if_then_else:VFULLI_D
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand")
+ (match_operand 5 "vector_length_operand")
+ (match_operand 6 "const_int_operand")
+ (match_operand 7 "const_int_operand")
+ (match_operand 8 "const_int_operand")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VFULLI_D
+ [(vec_duplicate:VFULLI_D
+ (match_operand:<VEL> 4 "reg_or_int_operand"))
+ (match_operand:VFULLI_D 3 "register_operand")] VMULH)
+ (match_operand:VFULLI_D 2 "vector_merge_operand")))]
+ "TARGET_VECTOR"
+ {
+ if (!TARGET_64BIT)
+ {
+ rtx v = gen_reg_rtx (<MODE>mode);
+
+ if (riscv_vector::simm32_p (operands[4]))
+ operands[4] = gen_rtx_SIGN_EXTEND (<VEL>mode,
+ force_reg (Pmode, operands[4]));
+ else
+ {
+ if (CONST_INT_P (operands[4]))
+ operands[4] = force_reg (<VEL>mode, operands[4]);
+
+ riscv_vector::emit_nonvlmax_op (code_for_pred_broadcast (<MODE>mode),
+ v, operands[4], operands[5], <VM>mode);
+ emit_insn (gen_pred_mulh<v_su><mode> (operands[0], operands[1],
+ operands[2], operands[3], v, operands[5],
+ operands[6], operands[7], operands[8]));
+ DONE;
+ }
+ }
+ else
+ operands[4] = force_reg (<VEL>mode, operands[4]);
+ })
+
+(define_insn "*pred_mulh<v_su><mode>_scalar"
+ [(set (match_operand:VFULLI_D 0 "register_operand" "=vd, vr")
+ (if_then_else:VFULLI_D
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK")
+ (match_operand 6 "const_int_operand" " i, i")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VFULLI_D
+ [(vec_duplicate:VFULLI_D
+ (match_operand:<VEL> 4 "register_operand" " r, r"))
+ (match_operand:VFULLI_D 3 "register_operand" " vr, vr")] VMULH)
+ (match_operand:VFULLI_D 2 "vector_merge_operand" "0vu,0vu")))]
+ "TARGET_VECTOR"
+ "vmulh<v_su>.vx\t%0,%3,%4%p1"
+ [(set_attr "type" "vimul")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*pred_mulh<v_su><mode>_extended_scalar"
+ [(set (match_operand:VFULLI_D 0 "register_operand" "=vd, vr")
+ (if_then_else:VFULLI_D
+ (unspec:<VM>
+ [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1")
+ (match_operand 5 "vector_length_operand" " rK, rK")
+ (match_operand 6 "const_int_operand" " i, i")
+ (match_operand 7 "const_int_operand" " i, i")
+ (match_operand 8 "const_int_operand" " i, i")
+ (reg:SI VL_REGNUM)
+ (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+ (unspec:VFULLI_D
+ [(vec_duplicate:VFULLI_D
+ (sign_extend:<VEL>
+ (match_operand:<VSUBEL> 4 "register_operand" " r, r")))
+ (match_operand:VFULLI_D 3 "register_operand" " vr, vr")] VMULH)
+ (match_operand:VFULLI_D 2 "vector_merge_operand" "0vu,0vu")))]
+ "TARGET_VECTOR"
+ "vmulh<v_su>.vx\t%0,%3,%4%p1"
+ [(set_attr "type" "vimul")
+ (set_attr "mode" "<MODE>")])
+
;; -------------------------------------------------------------------------------
;; ---- Predicated integer unary operations
;; -------------------------------------------------------------------------------