};
bool slide1_sew64_helper (int, machine_mode, machine_mode,
machine_mode, rtx *);
+rtx gen_avl_for_scalar_move (rtx);
}
/* We classify builtin types into two classes:
return true;
}
+rtx
+gen_avl_for_scalar_move (rtx avl)
+{
+ /* AVL for scalar move has different behavior between 0 and large than 0. */
+ if (CONST_INT_P (avl))
+ {
+ /* So we could just set AVL to 1 for any constant other than 0. */
+ if (rtx_equal_p (avl, const0_rtx))
+ return const0_rtx;
+ else
+ return const1_rtx;
+ }
+ else
+ {
+ /* For non-constant value, we set any non zero value to 1 by
+ `sgtu new_avl,input_avl,zero` + `vsetvli`. */
+ rtx tmp = gen_reg_rtx (Pmode);
+ emit_insn (
+ gen_rtx_SET (tmp, gen_rtx_fmt_ee (GTU, Pmode, avl, const0_rtx)));
+ return tmp;
+ }
+}
+
} // namespace riscv_vector
else if (GET_MODE_BITSIZE (<VEL>mode) > GET_MODE_BITSIZE (Pmode))
{
// Case 2: vmv.s.x (TU) ==> andi vl + vlse.v (TU) in RV32 system.
- rtx tmp = gen_reg_rtx (Pmode);
- emit_insn (gen_rtx_SET (tmp, gen_rtx_AND (Pmode, operands[4], const1_rtx)));
- operands[4] = tmp;
+ operands[4] = riscv_vector::gen_avl_for_scalar_move (operands[4]);
operands[1] = CONSTM1_RTX (<VM>mode);
}
else
vlse64.v */
if (satisfies_constraint_Wb1 (operands[1]))
{
- rtx tmp = gen_reg_rtx (Pmode);
- emit_insn (gen_rtx_SET (tmp, gen_rtx_AND (Pmode, operands[4], const1_rtx)));
- operands[4] = tmp;
+ operands[4] = riscv_vector::gen_avl_for_scalar_move (operands[4]);
operands[1] = CONSTM1_RTX (<VM>mode);
}
}
/*
** foo3:
** ...
-** andi\t[a-x0-9]+,\s*[a-x0-9]+,\s*1
-** ...
** vlse64.v\tv[0-9]+,0\([a-x0-9]+\),zero
** ...
** ret
/*
** foo4:
** ...
-** andi\t[a-x0-9]+,\s*[a-x0-9]+,\s*1
-** ...
** vlse64.v\tv[0-9]+,0\([a-x0-9]+\),zero
** ...
** ret
/*
** foo10:
** ...
-** andi\t[a-x0-9]+,\s*[a-x0-9]+,\s*1
-** ...
** vmv.v.i\tv[0-9]+,\s*-15
** ...
*/
/*
** foo12:
** ...
-** andi\t[a-x0-9]+,\s*[a-x0-9]+,\s*1
-** ...
** vmv.v.i\tv[0-9]+,\s*0
** ...
** ret
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -fno-schedule-insns -fno-schedule-insns2 -O3" } */
+
+#include "riscv_vector.h"
+
+vuint64m2_t f1(vuint64m2_t var_17, uint64_t var_60)
+{
+ vuint64m2_t var_16 = __riscv_vmv_s_x_u64m2_tu(var_17,var_60, 0);
+ return var_16;
+}
+
+vuint64m2_t f2(vuint64m2_t var_17, uint64_t var_60)
+{
+ vuint64m2_t var_16 = __riscv_vmv_s_x_u64m2_tu(var_17,var_60, 4);
+ return var_16;
+}
+
+vuint64m2_t f3(vuint64m2_t var_17, uint64_t var_60, size_t vl)
+{
+ vuint64m2_t var_16 = __riscv_vmv_s_x_u64m2_tu(var_17,var_60, vl);
+ return var_16;
+}
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*0,\s*e64,\s*m2,\s*t[au],\s*m[au]} 1 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*1,\s*e64,\s*m2,\s*t[au],\s*m[au]} 1 } } */
+/* { dg-final { scan-assembler-times {sgtu} 1 } } */