vslideup works by leaving elements 0<i<OFFSET undisturbed.
so it need the destination operand as input for correctness
regardless of policy. Add a operand to indicate policy.
We also add policy operand for unmaksed vslidedown to keep the interface consistent with vslideup
because vslidedown have only undisturbed at 0<i<vstart but user have no way to control of vstart.
Reviewed By: rogfer01, craig.topper
Differential Revision: https://reviews.llvm.org/D124186
let HasMaskedOffOperand = false;
}
-let HasMaskedOffOperand = false in {
+let UnMaskedPolicy = HasPolicyOperand,
+ HasMaskedOffOperand = false in {
multiclass RVVSlideBuiltinSet {
defm "" : RVVOutBuiltinSet<NAME, "csilxfd",
[["vx","v", "vvvz"]]>;
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t dst, vint16m1_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t dst, vint16m2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t dst, vint16m4_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t dst, vint16m8_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t dst, vint32m1_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t dst, vint32m2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t dst, vint32m4_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t dst, vint32m8_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t dst, vint64m1_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t dst, vint64m2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t dst, vint64m4_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t dst, vint64m8_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t dst, vuint8m1_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t dst, vuint8m2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t dst, vuint8m4_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t dst, vuint8m8_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t dst, vuint16m1_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t dst, vuint16m2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t dst, vuint16m4_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t dst, vuint16m8_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t dst, vuint32m1_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t dst, vuint32m2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t dst, vuint32m4_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t dst, vuint32m8_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t dst, vuint64m1_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t dst, vuint64m2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t dst, vuint64m4_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t dst, vuint64m8_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dst, vint16m1_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dst, vint16m2_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dst, vint16m4_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dst, vint16m8_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dst, vint32m1_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dst, vint32m2_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dst, vint32m4_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dst, vint32m8_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dst, vint64m1_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dst, vint64m2_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dst, vint64m4_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dst, vint64m8_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dst, vuint8m1_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dst, vuint8m2_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dst, vuint8m4_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dst, vuint8m8_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dst, vuint16m1_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dst, vuint16m2_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dst, vuint16m4_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dst, vuint16m8_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dst, vuint32m1_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dst, vuint32m2_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dst, vuint32m4_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dst, vuint32m8_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dst, vuint64m1_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dst, vuint64m2_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dst, vuint64m4_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dst, vuint64m8_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t dst, vint16m1_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t dst, vint16m2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t dst, vint16m4_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t dst, vint16m8_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t dst, vint32m1_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t dst, vint32m2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t dst, vint32m4_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t dst, vint32m8_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t dst, vint64m1_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t dst, vint64m2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t dst, vint64m4_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t dst, vint64m8_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t dst, vuint8m1_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t dst, vuint8m2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t dst, vuint8m4_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t dst, vuint8m8_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t dst, vuint16m1_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t dst, vuint16m2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t dst, vuint16m4_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t dst, vuint16m8_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t dst, vuint32m1_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t dst, vuint32m2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t dst, vuint32m4_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t dst, vuint32m8_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t dst, vuint64m1_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t dst, vuint64m2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t dst, vuint64m4_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t dst, vuint64m8_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src,
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vslidedown_vx_f16mf4 (vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vslidedown_vx_f16mf2 (vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vslidedown_vx_f16m1 (vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vslidedown_vx_f16m2 (vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vslidedown_vx_f16m4 (vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vslidedown_vx_f16m8 (vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {
// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dst, vint16m1_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dst, vint16m2_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dst, vint16m4_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dst, vint16m8_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dst, vint32m1_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dst, vint32m2_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dst, vint32m4_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dst, vint32m8_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dst, vint64m1_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dst, vint64m2_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dst, vint64m4_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dst, vint64m8_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dst, vuint8m1_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dst, vuint8m2_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dst, vuint8m4_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dst, vuint8m8_t src, size_t offset,
// CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dst, vuint16m1_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dst, vuint16m2_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dst, vuint16m4_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dst, vuint16m8_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dst, vuint32m1_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dst, vuint32m2_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dst, vuint32m4_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dst, vuint32m8_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dst, vuint64m1_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dst, vuint64m2_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dst, vuint64m4_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dst, vuint64m8_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src,
// CHECK-RV64-LABEL: @test_vslideup_vx_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vslideup_vx_f16mf4 (vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
// CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslideup.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vslideup_vx_f16mf2 (vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslideup.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vslideup_vx_f16m1 (vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslideup.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vslideup_vx_f16m2 (vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslideup.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vslideup_vx_f16m4 (vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
// CHECK-RV64-LABEL: @test_vslideup_vx_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslideup.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslideup.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vslideup_vx_f16m8 (vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {
[ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
let VLOperand = 4;
}
- // Input: (vector_in, vector_in, vector_in/scalar_in, vl)
- class RISCVTernaryAAAXUnMasked
+ // Input: (vector_in, vector_in, scalar_in, vl, policy)
+ class RVVSlideUnMasked
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
- LLVMMatchType<1>],
- [IntrNoMem]>, RISCVVIntrinsic {
+ LLVMMatchType<1>, LLVMMatchType<1>],
+ [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
let VLOperand = 3;
}
// Input: (vector_in, vector_in, vector_in/scalar_in, mask, vl, policy)
- class RISCVTernaryAAAXMasked
+ class RVVSlideMasked
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftUnMasked;
def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMasked;
}
- multiclass RISCVTernaryAAAX {
- def "int_riscv_" # NAME : RISCVTernaryAAAXUnMasked;
- def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAAXMasked;
+ multiclass RVVSlide {
+ def "int_riscv_" # NAME : RVVSlideUnMasked;
+ def "int_riscv_" # NAME # "_mask" : RVVSlideMasked;
}
multiclass RISCVTernaryAAXA {
def "int_riscv_" # NAME : RISCVTernaryAAXAUnMasked;
defm vfmerge : RISCVBinaryWithV0;
- defm vslideup : RISCVTernaryAAAX;
- defm vslidedown : RISCVTernaryAAAX;
+ defm vslideup : RVVSlide;
+ defm vslidedown : RVVSlide;
defm vslide1up : RISCVBinaryAAX;
defm vslide1down : RISCVBinaryAAX;
}
}
-multiclass VPseudoTernaryV_VX<string Constraint = ""> {
+multiclass VPseudoVSLDV_VX<string Constraint = ""> {
foreach m = MxList in
- defm _VX : VPseudoTernaryNoMaskNoPolicy<m.vrclass, m.vrclass, GPR, m, Constraint>;
+ defm _VX : VPseudoTernaryWithPolicy<m.vrclass, m.vrclass, GPR, m, Constraint>;
}
multiclass VPseudoTernaryV_VX_AAXA<string Constraint = ""> {
m.vrclass, m, constraint>;
}
-multiclass VPseudoTernaryV_VI<Operand ImmType = simm5, string Constraint = ""> {
+multiclass VPseudoVSLDV_VI<Operand ImmType = simm5, string Constraint = ""> {
foreach m = MxList in
- defm _VI : VPseudoTernaryNoMaskNoPolicy<m.vrclass, m.vrclass, ImmType, m, Constraint>;
+ defm _VI : VPseudoTernaryWithPolicy<m.vrclass, m.vrclass, ImmType, m, Constraint>;
}
multiclass VPseudoVMAC_VV_VX_AAXA<string Constraint = ""> {
}
multiclass VPseudoVSLD_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
- defm "" : VPseudoTernaryV_VX<Constraint>,
+ defm "" : VPseudoVSLDV_VX<Constraint>,
Sched<[WriteVISlideX, ReadVISlideV, ReadVISlideV, ReadVISlideX, ReadVMask]>;
- defm "" : VPseudoTernaryV_VI<ImmType, Constraint>,
+ defm "" : VPseudoVSLDV_VI<ImmType, Constraint>,
Sched<[WriteVISlideI, ReadVISlideV, ReadVISlideV, ReadVMask]>;
}
multiclass VPatTernaryV_VX<string intrinsic, string instruction,
list<VTypeInfo> vtilist> {
foreach vti = vtilist in
- defm : VPatTernaryNoMaskNoPolicy<intrinsic, instruction, "VX",
- vti.Vector, vti.Vector, XLenVT, vti.Mask,
- vti.Log2SEW, vti.LMul, vti.RegClass,
- vti.RegClass, GPR>;
+ defm : VPatTernaryWithPolicy<intrinsic, instruction, "VX",
+ vti.Vector, vti.Vector, XLenVT, vti.Mask,
+ vti.Log2SEW, vti.LMul, vti.RegClass,
+ vti.RegClass, GPR>;
}
multiclass VPatTernaryV_VX_AAXA<string intrinsic, string instruction,
multiclass VPatTernaryV_VI<string intrinsic, string instruction,
list<VTypeInfo> vtilist, Operand Imm_type> {
foreach vti = vtilist in
- defm : VPatTernaryNoMaskNoPolicy<intrinsic, instruction, "VI",
- vti.Vector, vti.Vector, XLenVT, vti.Mask,
- vti.Log2SEW, vti.LMul, vti.RegClass,
- vti.RegClass, Imm_type>;
+ defm : VPatTernaryWithPolicy<intrinsic, instruction, "VI",
+ vti.Vector, vti.Vector, XLenVT, vti.Mask,
+ vti.Log2SEW, vti.LMul, vti.RegClass,
+ vti.RegClass, Imm_type>;
}
multiclass VPatTernaryW_VV<string intrinsic, string instruction,
: VPatTernaryV_VX<intrinsic, instruction, vtilist>,
VPatTernaryV_VI<intrinsic, instruction, vtilist, Imm_type>;
+
multiclass VPatBinaryM_VV_VX_VI<string intrinsic, string instruction,
list<VTypeInfo> vtilist>
: VPatBinaryM_VV<intrinsic, instruction, vtilist>,
VLOpFrag)),
(!cast<Instruction>("PseudoVSLIDEUP_VI_"#vti.LMul.MX)
vti.RegClass:$rs3, vti.RegClass:$rs1, uimm5:$rs2,
- GPR:$vl, vti.Log2SEW)>;
+ GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(vti.Vector (riscv_slideup_vl (vti.Vector vti.RegClass:$rs3),
(vti.Vector vti.RegClass:$rs1),
VLOpFrag)),
(!cast<Instruction>("PseudoVSLIDEUP_VX_"#vti.LMul.MX)
vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2,
- GPR:$vl, vti.Log2SEW)>;
+ GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector vti.RegClass:$rs3),
(vti.Vector vti.RegClass:$rs1),
VLOpFrag)),
(!cast<Instruction>("PseudoVSLIDEDOWN_VI_"#vti.LMul.MX)
vti.RegClass:$rs3, vti.RegClass:$rs1, uimm5:$rs2,
- GPR:$vl, vti.Log2SEW)>;
+ GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector vti.RegClass:$rs3),
(vti.Vector vti.RegClass:$rs1),
VLOpFrag)),
(!cast<Instruction>("PseudoVSLIDEDOWN_VX_"#vti.LMul.MX)
vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2,
- GPR:$vl, vti.Log2SEW)>;
+ GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
} // Predicates = [HasVInstructions]
; CHECK-NEXT: $x5 = LD $x2, 0 :: (load (s64) from %stack.16)
; CHECK-NEXT: renamable $v0 = PseudoVRELOAD_M1 killed $x1 :: (load unknown-size from %stack.1, align 8)
; CHECK-NEXT: $x1 = LD $x2, 8 :: (load (s64) from %stack.15)
- ; CHECK-NEXT: renamable $v0 = PseudoVSLIDEDOWN_VX_M1 undef renamable $v0, killed renamable $v0, killed renamable $x13, $noreg, 3 /* e8 */, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: renamable $v0 = PseudoVSLIDEDOWN_VX_M1 undef renamable $v0, killed renamable $v0, killed renamable $x13, $noreg, 3 /* e8 */, 1, implicit $vl, implicit $vtype
; CHECK-NEXT: renamable $x13 = PseudoVMV_X_S_M1 killed renamable $v0, 3 /* e8 */, implicit $vl, implicit $vtype
; CHECK-NEXT: BLT killed renamable $x16, renamable $x27, %bb.2
; CHECK-NEXT: {{ $}}
dead renamable $x13 = PseudoVSETIVLI 1, 64, implicit-def $vl, implicit-def $vtype
renamable $x13 = nsw ADDI renamable $x16, -2
renamable $v0 = PseudoVRELOAD_M1 %stack.1 :: (load unknown-size from %stack.1, align 8)
- renamable $v0 = PseudoVSLIDEDOWN_VX_M1 undef renamable $v0, killed renamable $v0, killed renamable $x13, $noreg, 3, implicit $vl, implicit $vtype
+ renamable $v0 = PseudoVSLIDEDOWN_VX_M1 undef renamable $v0, killed renamable $v0, killed renamable $x13, $noreg, 3, 1, implicit $vl, implicit $vtype
renamable $x13 = PseudoVMV_X_S_M1 killed renamable $v0, 3, implicit $vl, implicit $vtype
BLT killed renamable $x16, renamable $x27, %bb.2
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v14, v10, a0
; CHECK-NEXT: vslidedown.vx v12, v9, a0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vi v13, v14, 0
; CHECK-NEXT: add a1, a0, a0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vx v12, v10, a0
; CHECK-NEXT: vmv2r.v v8, v12
; CHECK-NEXT: ret
; LMULMAX8-NEXT: vmerge.vim v16, v16, 1, v0
; LMULMAX8-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX8-NEXT: vmv.v.i v17, 0
-; LMULMAX8-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; LMULMAX8-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; LMULMAX8-NEXT: vslideup.vi v17, v16, 0
; LMULMAX8-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX8-NEXT: vmsne.vi v16, v17, 0
; LMULMAX4-NEXT: vmerge.vim v12, v12, 1, v0
; LMULMAX4-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX4-NEXT: vmv.v.i v13, 0
-; LMULMAX4-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; LMULMAX4-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; LMULMAX4-NEXT: vslideup.vi v13, v12, 0
; LMULMAX4-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX4-NEXT: vmsne.vi v12, v13, 0
; LMULMAX2-NEXT: vmerge.vim v10, v10, 1, v0
; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX2-NEXT: vmv.v.i v11, 0
-; LMULMAX2-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; LMULMAX2-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; LMULMAX2-NEXT: vslideup.vi v11, v10, 0
; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX2-NEXT: vmsne.vi v10, v11, 0
; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX1-NEXT: vmv.v.i v10, 0
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslideup.vi v10, v9, 0
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX1-NEXT: vmsne.vi v9, v10, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
; LMULMAX1-NEXT: vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 8
; LMULMAX1-NEXT: vse8.v v8, (a0)
; LMULMAX1-NEXT: ret
; LMULMAX1-NEXT: vncvt.x.x.w v8, v8
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
; LMULMAX1-NEXT: vse8.v v8, (a0)
; LMULMAX1-NEXT: ret
; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
; LMULMAX1-NEXT: vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
; LMULMAX1-NEXT: vse16.v v8, (a0)
; LMULMAX1-NEXT: ret
; LMULMAX1-NEXT: vncvt.x.x.w v8, v8
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v10
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 8
; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v11
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 12
; LMULMAX1-NEXT: vse8.v v8, (a0)
; LMULMAX1-NEXT: ret
; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
; LMULMAX1-NEXT: vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v11
; LMULMAX1-NEXT: vncvt.x.x.w v10, v10
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v10, v9, 4
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse16.v v10, (a1)
; LMULMAX1-NEXT: vncvt.x.x.w v8, v8
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 2
; LMULMAX1-NEXT: vse8.v v8, (a0)
; LMULMAX1-NEXT: ret
; LMULMAX1-NEXT: vncvt.x.x.w v8, v8
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 2
; LMULMAX1-NEXT: vse16.v v8, (a0)
; LMULMAX1-NEXT: ret
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
; LMULMAX1-NEXT: vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 2
; LMULMAX1-NEXT: vse32.v v8, (a0)
; LMULMAX1-NEXT: ret
; LMULMAX1-NEXT: vncvt.x.x.w v8, v8
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 2
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v10
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v11
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 6
; LMULMAX1-NEXT: vse8.v v8, (a0)
; LMULMAX1-NEXT: ret
; LMULMAX1-NEXT: vncvt.x.x.w v8, v8
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 2
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v10
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v11
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 6
; LMULMAX1-NEXT: vse16.v v8, (a0)
; LMULMAX1-NEXT: ret
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
; LMULMAX1-NEXT: vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 2
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v11
; LMULMAX1-NEXT: vncvt.x.x.w v10, v10
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v10, v9, 2
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse32.v v10, (a1)
; LMULMAX1-NEXT: vncvt.x.x.w v8, v8
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 4, e8, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 2
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v10
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT: vsetivli zero, 6, e8, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 6, e8, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v11
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 6
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v12
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT: vsetivli zero, 10, e8, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 10, e8, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 8
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v13
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 10
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v14
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT: vsetivli zero, 14, e8, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 14, e8, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 12
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v15
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 14
; LMULMAX1-NEXT: vse8.v v8, (a0)
; LMULMAX1-NEXT: ret
; LMULMAX4-NEXT: vncvt.x.x.w v8, v14
; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; LMULMAX4-NEXT: vncvt.x.x.w v8, v8
-; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, tu, mu
+; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; LMULMAX4-NEXT: vslideup.vi v8, v12, 8
; LMULMAX4-NEXT: vse8.v v8, (a0)
; LMULMAX4-NEXT: ret
; LMULMAX1-NEXT: vncvt.x.x.w v8, v8
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 2
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v10
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v11
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 6
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v13
; LMULMAX1-NEXT: vncvt.x.x.w v10, v12
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v10, v10
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v10, v9, 2
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v14
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v10, v9, 4
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v15
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v10, v9, 6
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse16.v v10, (a1)
; LMULMAX4-NEXT: vncvt.x.x.w v14, v8
; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; LMULMAX4-NEXT: vncvt.x.x.w v8, v14
-; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, tu, mu
+; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; LMULMAX4-NEXT: vslideup.vi v8, v12, 8
; LMULMAX4-NEXT: vse16.v v8, (a0)
; LMULMAX4-NEXT: ret
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
; LMULMAX1-NEXT: vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 2
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v11
; LMULMAX1-NEXT: vncvt.x.x.w v10, v10
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v10, v9, 2
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v13
; LMULMAX1-NEXT: vncvt.x.x.w v11, v12
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v11, v9, 2
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v15
; LMULMAX1-NEXT: vncvt.x.x.w v12, v14
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v12, v9, 2
; LMULMAX1-NEXT: addi a1, a0, 48
; LMULMAX1-NEXT: vse32.v v12, (a1)
; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; LMULMAX4-NEXT: vncvt.x.x.w v16, v12
; LMULMAX4-NEXT: vncvt.x.x.w v12, v8
-; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, tu, mu
+; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; LMULMAX4-NEXT: vslideup.vi v12, v16, 8
; LMULMAX4-NEXT: vse32.v v12, (a0)
; LMULMAX4-NEXT: ret
; LMULMAX2-NEXT: vmerge.vim v8, v8, 1, v0
; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX2-NEXT: vmv.v.i v9, 0
-; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; LMULMAX2-NEXT: vslideup.vi v9, v8, 0
; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX2-NEXT: vmsne.vi v8, v9, 0
; LMULMAX1-NEXT: vmerge.vim v8, v8, 1, v0
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX1-NEXT: vmv.v.i v9, 0
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslideup.vi v9, v8, 0
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX1-NEXT: vmsne.vi v8, v9, 0
; LMULMAX2-NEXT: vmerge.vim v8, v8, 1, v0
; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX2-NEXT: vmv.v.i v9, 0
-; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; LMULMAX2-NEXT: vslideup.vi v9, v8, 0
; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX2-NEXT: vmsne.vi v8, v9, 0
; LMULMAX1-NEXT: vmerge.vim v8, v8, 1, v0
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX1-NEXT: vmv.v.i v9, 0
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslideup.vi v9, v8, 0
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX1-NEXT: vmsne.vi v8, v9, 0
; LMULMAX2-NEXT: vmerge.vim v8, v8, 1, v0
; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX2-NEXT: vmv.v.i v9, 0
-; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; LMULMAX2-NEXT: vslideup.vi v9, v8, 0
; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX2-NEXT: vmsne.vi v8, v9, 0
; LMULMAX1-NEXT: vmerge.vim v8, v8, 1, v0
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX1-NEXT: vmv.v.i v9, 0
-; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslideup.vi v9, v8, 0
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX1-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: addi a1, a1, %lo(.LCPI4_0)
; CHECK-NEXT: vlse32.v v8, (a1), zero
; CHECK-NEXT: vmv.s.x v9, zero
-; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 3, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 2
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vmv.s.x v8, zero
; CHECK-NEXT: vfmv.v.f v9, fa0
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vse32.v v9, (a0)
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vfmv.s.f v8, ft0
; CHECK-NEXT: vfmv.v.f v9, fa0
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vse32.v v9, (a0)
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; LMULMAX1-NEXT: vfncvt.f.f.w v10, v8
; LMULMAX1-NEXT: vfncvt.f.f.w v8, v9
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v10, v8, 4
; LMULMAX1-NEXT: vse16.v v10, (a1)
; LMULMAX1-NEXT: ret
; LMULMAX1-NEXT: vfncvt.rod.f.f.w v12, v11
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; LMULMAX1-NEXT: vfncvt.f.f.w v11, v12
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v10, v11, 2
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vfncvt.rod.f.f.w v11, v9
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; LMULMAX1-NEXT: vfncvt.f.f.w v9, v11
-; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v10, v9, 4
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vfncvt.rod.f.f.w v9, v8
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; LMULMAX1-NEXT: vfncvt.f.f.w v8, v9
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v10, v8, 6
; LMULMAX1-NEXT: vse16.v v10, (a1)
; LMULMAX1-NEXT: ret
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 7, e32, m2, ta, mu
; CHECK-NEXT: vslidedown.vi v10, v8, 1
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, mu
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; CHECK-NEXT: vslideup.vi v10, v8, 7
-; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
%s = shufflevector <8 x float> %x, <8 x float> poison, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0>
ret <8 x float> %s
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m4, ta, mu
; CHECK-NEXT: vslidedown.vi v12, v8, 6
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, tu, mu
+; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; CHECK-NEXT: vslideup.vi v12, v8, 2
-; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
%s = shufflevector <8 x double> %x, <8 x double> poison, <8 x i32> <i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5>
ret <8 x double> %s
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 6, e32, m2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 2
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, mu
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 6
; CHECK-NEXT: ret
%s = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 9>
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 3, e64, m4, ta, mu
; CHECK-NEXT: vslidedown.vi v12, v12, 5
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, tu, mu
+; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; CHECK-NEXT: vslideup.vi v12, v8, 3
-; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
%s = shufflevector <8 x double> %x, <8 x double> %y, <8 x i32> <i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4>
ret <8 x double> %s
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; LMULMAX1-NEXT: vmv.v.i v9, 0
; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; LMULMAX1-NEXT: vmsne.vi v0, v8, 0
; LMULMAX1-NEXT: ret
%z = fptosi <8 x float> %x to <8 x i1>
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; LMULMAX1-NEXT: vmv.v.i v9, 0
; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; LMULMAX1-NEXT: vmsne.vi v0, v8, 0
; LMULMAX1-NEXT: ret
%z = fptoui <8 x float> %x to <8 x i1>
; LMULMAX1-NEXT: vncvt.x.x.w v11, v12
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v11, v11
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslideup.vi v10, v11, 2
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v11, v9
; LMULMAX1-NEXT: vncvt.x.x.w v9, v11
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslideup.vi v10, v9, 4
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v9, v8
; LMULMAX1-NEXT: vncvt.x.x.w v8, v9
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslideup.vi v10, v8, 6
; LMULMAX1-NEXT: vse8.v v10, (a1)
; LMULMAX1-NEXT: ret
; LMULMAX1-NEXT: vncvt.x.x.w v11, v12
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v11, v11
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslideup.vi v10, v11, 2
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v11, v9
; LMULMAX1-NEXT: vncvt.x.x.w v9, v11
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslideup.vi v10, v9, 4
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v9, v8
; LMULMAX1-NEXT: vncvt.x.x.w v8, v9
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v8, v8
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslideup.vi v10, v8, 6
; LMULMAX1-NEXT: vse8.v v10, (a1)
; LMULMAX1-NEXT: ret
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vmv.v.i v9, 0
; LMULMAX1-NEXT: vmerge.vim v13, v9, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslideup.vi v12, v13, 2
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX1-NEXT: vmsne.vi v0, v12, 0
; LMULMAX1-NEXT: vmsne.vi v0, v10, 0
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vmerge.vim v10, v9, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslideup.vi v12, v10, 4
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX1-NEXT: vmsne.vi v0, v12, 0
; LMULMAX1-NEXT: vmsne.vi v0, v10, 0
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 6
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; LMULMAX1-NEXT: vmsne.vi v0, v8, 0
; LMULMAX1-NEXT: ret
%z = fptosi <8 x double> %x to <8 x i1>
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vmv.v.i v9, 0
; LMULMAX1-NEXT: vmerge.vim v13, v9, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslideup.vi v12, v13, 2
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX1-NEXT: vmsne.vi v0, v12, 0
; LMULMAX1-NEXT: vmsne.vi v0, v10, 0
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vmerge.vim v10, v9, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslideup.vi v12, v10, 4
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX1-NEXT: vmsne.vi v0, v12, 0
; LMULMAX1-NEXT: vmsne.vi v0, v10, 0
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 6
-; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; LMULMAX1-NEXT: vmsne.vi v0, v8, 0
; LMULMAX1-NEXT: ret
%z = fptoui <8 x double> %x to <8 x i1>
; LMULMAX1-NEXT: vfncvt.f.x.w v12, v11
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; LMULMAX1-NEXT: vfncvt.f.f.w v11, v12
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v10, v11, 2
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vfncvt.f.x.w v11, v9
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; LMULMAX1-NEXT: vfncvt.f.f.w v9, v11
-; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v10, v9, 4
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vfncvt.f.x.w v9, v8
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; LMULMAX1-NEXT: vfncvt.f.f.w v8, v9
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v10, v8, 6
; LMULMAX1-NEXT: vse16.v v10, (a1)
; LMULMAX1-NEXT: ret
; LMULMAX1-NEXT: vfncvt.f.xu.w v12, v11
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; LMULMAX1-NEXT: vfncvt.f.f.w v11, v12
-; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v10, v11, 2
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vfncvt.f.xu.w v11, v9
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; LMULMAX1-NEXT: vfncvt.f.f.w v9, v11
-; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v10, v9, 4
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vfncvt.f.xu.w v9, v8
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; LMULMAX1-NEXT: vfncvt.f.f.w v8, v9
-; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v10, v8, 6
; LMULMAX1-NEXT: vse16.v v10, (a1)
; LMULMAX1-NEXT: ret
; RV32-NEXT: vmv.v.i v9, 0
; RV32-NEXT: vmerge.vim v9, v9, 1, v0
; RV32-NEXT: addi a0, a1, 1
-; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; RV32-NEXT: vslideup.vx v9, v8, a1
; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; RV32-NEXT: vand.vi v8, v9, 1
; RV64-NEXT: vmerge.vim v9, v9, 1, v0
; RV64-NEXT: sext.w a0, a1
; RV64-NEXT: addi a1, a0, 1
-; RV64-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
+; RV64-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; RV64-NEXT: vslideup.vx v9, v8, a0
; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; RV64-NEXT: vand.vi v8, v9, 1
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu
; CHECK-NEXT: vslideup.vi v9, v8, 1
-; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vand.vi v8, v9, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
; RV32-NEXT: vmv.v.i v9, 0
; RV32-NEXT: vmerge.vim v9, v9, 1, v0
; RV32-NEXT: addi a0, a1, 1
-; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; RV32-NEXT: vslideup.vx v9, v8, a1
; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; RV32-NEXT: vand.vi v8, v9, 1
; RV64-NEXT: vmerge.vim v9, v9, 1, v0
; RV64-NEXT: sext.w a0, a1
; RV64-NEXT: addi a1, a0, 1
-; RV64-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
+; RV64-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; RV64-NEXT: vslideup.vx v9, v8, a0
; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; RV64-NEXT: vand.vi v8, v9, 1
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vand.vi v8, v9, 1
; RV32-NEXT: vmv.v.i v9, 0
; RV32-NEXT: vmerge.vim v9, v9, 1, v0
; RV32-NEXT: addi a0, a1, 1
-; RV32-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
+; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; RV32-NEXT: vslideup.vx v9, v8, a1
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV32-NEXT: vand.vi v8, v9, 1
; RV64-NEXT: vmerge.vim v9, v9, 1, v0
; RV64-NEXT: sext.w a0, a1
; RV64-NEXT: addi a1, a0, 1
-; RV64-NEXT: vsetvli zero, a1, e8, mf2, tu, mu
+; RV64-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; RV64-NEXT: vslideup.vx v9, v8, a0
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV64-NEXT: vand.vi v8, v9, 1
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: vmv.v.i v12, 0
; CHECK-NEXT: vmerge.vim v12, v12, 1, v0
-; CHECK-NEXT: vsetivli zero, 2, e8, m4, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e8, m4, ta, mu
; CHECK-NEXT: vslideup.vi v12, v8, 1
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vand.vi v8, v12, 1
; RV32-NEXT: vmv.v.i v12, 0
; RV32-NEXT: vmerge.vim v12, v12, 1, v0
; RV32-NEXT: addi a0, a1, 1
-; RV32-NEXT: vsetvli zero, a0, e8, m4, tu, mu
+; RV32-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; RV32-NEXT: vslideup.vx v12, v8, a1
; RV32-NEXT: vsetvli zero, a2, e8, m4, ta, mu
; RV32-NEXT: vand.vi v8, v12, 1
; RV64-NEXT: vmerge.vim v12, v12, 1, v0
; RV64-NEXT: sext.w a0, a1
; RV64-NEXT: addi a1, a0, 1
-; RV64-NEXT: vsetvli zero, a1, e8, m4, tu, mu
+; RV64-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; RV64-NEXT: vslideup.vx v12, v8, a0
; RV64-NEXT: vsetvli zero, a2, e8, m4, ta, mu
; RV64-NEXT: vand.vi v8, v12, 1
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; CHECK-NEXT: vle32.v v12, (a0)
-; CHECK-NEXT: vsetivli zero, 2, e32, m4, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e32, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 0
; CHECK-NEXT: ret
%sv = load <2 x i32>, <2 x i32>* %svp
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; CHECK-NEXT: vle32.v v12, (a0)
-; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e32, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 2
; CHECK-NEXT: ret
%sv = load <2 x i32>, <2 x i32>* %svp
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; CHECK-NEXT: vle32.v v12, (a0)
-; CHECK-NEXT: vsetivli zero, 8, e32, m4, tu, mu
+; CHECK-NEXT: vsetivli zero, 8, e32, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 6
; CHECK-NEXT: ret
%sv = load <2 x i32>, <2 x i32>* %svp
; LMULMAX2: # %bb.0:
; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; LMULMAX2-NEXT: vle32.v v12, (a0)
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m4, tu, mu
+; LMULMAX2-NEXT: vsetivli zero, 8, e32, m4, ta, mu
; LMULMAX2-NEXT: vslideup.vi v8, v12, 0
; LMULMAX2-NEXT: ret
;
; LMULMAX1-NEXT: vle32.v v12, (a0)
; LMULMAX1-NEXT: addi a0, a0, 16
; LMULMAX1-NEXT: vle32.v v16, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m4, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 4, e32, m4, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v12, 0
-; LMULMAX1-NEXT: vsetivli zero, 8, e32, m4, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 8, e32, m4, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v16, 4
; LMULMAX1-NEXT: ret
%sv = load <8 x i32>, <8 x i32>* %svp
; LMULMAX2: # %bb.0:
; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; LMULMAX2-NEXT: vle32.v v12, (a0)
-; LMULMAX2-NEXT: vsetivli zero, 16, e32, m4, tu, mu
+; LMULMAX2-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; LMULMAX2-NEXT: vslideup.vi v8, v12, 8
; LMULMAX2-NEXT: ret
;
; LMULMAX1-NEXT: vle32.v v12, (a0)
; LMULMAX1-NEXT: addi a0, a0, 16
; LMULMAX1-NEXT: vle32.v v16, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 12, e32, m4, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 12, e32, m4, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v12, 8
-; LMULMAX1-NEXT: vsetivli zero, 16, e32, m4, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v16, 12
; LMULMAX1-NEXT: ret
%sv = load <8 x i32>, <8 x i32>* %svp
; CHECK-NEXT: vle32.v v8, (a1)
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vse32.v v9, (a0)
; CHECK-NEXT: vle32.v v8, (a1)
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vslideup.vi v9, v8, 2
; CHECK-NEXT: vse32.v v9, (a0)
; CHECK-NEXT: ret
; LMULMAX2-NEXT: vle32.v v8, (a1)
; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; LMULMAX2-NEXT: vle32.v v10, (a0)
-; LMULMAX2-NEXT: vsetivli zero, 2, e32, m2, tu, mu
+; LMULMAX2-NEXT: vsetivli zero, 2, e32, m2, ta, mu
; LMULMAX2-NEXT: vslideup.vi v10, v8, 0
; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; LMULMAX2-NEXT: vse32.v v10, (a0)
; LMULMAX1-NEXT: vle32.v v8, (a1)
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-NEXT: vle32.v v9, (a0)
-; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu
; LMULMAX1-NEXT: vslideup.vi v9, v8, 0
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-NEXT: vse32.v v9, (a0)
; LMULMAX2-NEXT: vle32.v v8, (a1)
; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; LMULMAX2-NEXT: vle32.v v10, (a0)
-; LMULMAX2-NEXT: vsetivli zero, 4, e32, m2, tu, mu
+; LMULMAX2-NEXT: vsetivli zero, 4, e32, m2, ta, mu
; LMULMAX2-NEXT: vslideup.vi v10, v8, 2
; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; LMULMAX2-NEXT: vse32.v v10, (a0)
; LMULMAX1-NEXT: vle32.v v8, (a1)
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-NEXT: vle32.v v9, (a0)
-; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; LMULMAX1-NEXT: vslideup.vi v9, v8, 2
; LMULMAX1-NEXT: vse32.v v9, (a0)
; LMULMAX1-NEXT: ret
; LMULMAX2-NEXT: vle32.v v8, (a1)
; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; LMULMAX2-NEXT: vle32.v v10, (a0)
-; LMULMAX2-NEXT: vsetvli zero, zero, e32, m2, tu, mu
; LMULMAX2-NEXT: vslideup.vi v10, v8, 6
; LMULMAX2-NEXT: vse32.v v10, (a0)
; LMULMAX2-NEXT: ret
; LMULMAX1-NEXT: addi a0, a0, 16
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-NEXT: vle32.v v9, (a0)
-; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; LMULMAX1-NEXT: vslideup.vi v9, v8, 2
; LMULMAX1-NEXT: vse32.v v9, (a0)
; LMULMAX1-NEXT: ret
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; CHECK-NEXT: vle16.v v9, (a1)
-; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 0
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; CHECK-NEXT: vle16.v v9, (a1)
-; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 2
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
; LMULMAX2-NEXT: vlm.v v8, (a0)
; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX2-NEXT: vlm.v v9, (a1)
-; LMULMAX2-NEXT: vsetivli zero, 1, e8, mf4, tu, mu
+; LMULMAX2-NEXT: vsetivli zero, 1, e8, mf4, ta, mu
; LMULMAX2-NEXT: vslideup.vi v8, v9, 0
; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu
; LMULMAX2-NEXT: vsm.v v8, (a0)
; LMULMAX1-NEXT: vlm.v v8, (a0)
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX1-NEXT: vlm.v v9, (a1)
-; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 0
; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; LMULMAX1-NEXT: vsm.v v8, (a0)
; LMULMAX2-NEXT: vlm.v v8, (a0)
; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX2-NEXT: vlm.v v9, (a1)
-; LMULMAX2-NEXT: vsetivli zero, 3, e8, mf4, tu, mu
+; LMULMAX2-NEXT: vsetivli zero, 3, e8, mf4, ta, mu
; LMULMAX2-NEXT: vslideup.vi v8, v9, 2
; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu
; LMULMAX2-NEXT: vsm.v v8, (a0)
; LMULMAX1-NEXT: vlm.v v8, (a0)
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX1-NEXT: vlm.v v9, (a1)
-; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 0
; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; LMULMAX1-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 4
-; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; CHECK-NEXT: vle16.v v9, (a0)
-; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 0
; CHECK-NEXT: ret
%sv = load <2 x i16>, <2 x i16>* %svp
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; CHECK-NEXT: vle16.v v9, (a0)
-; CHECK-NEXT: vsetivli zero, 6, e16, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 6, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 4
; CHECK-NEXT: ret
%sv = load <2 x i16>, <2 x i16>* %svp
; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
-; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmsne.vi v0, v9, 0
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vlm.v v8, (a0)
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, tu, mu
+; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; CHECK-NEXT: vslideup.vi v0, v8, 0
; CHECK-NEXT: ret
%sv = load <8 x i1>, <8 x i1>* %svp
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vlm.v v8, (a0)
-; CHECK-NEXT: vsetivli zero, 3, e8, mf8, tu, mu
+; CHECK-NEXT: vsetivli zero, 3, e8, mf8, ta, mu
; CHECK-NEXT: vslideup.vi v0, v8, 2
; CHECK-NEXT: ret
%sv = load <8 x i1>, <8 x i1>* %svp
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vle64.v v16, (a1)
-; CHECK-NEXT: vsetivli zero, 6, e64, m8, tu, mu
+; CHECK-NEXT: vsetivli zero, 6, e64, m8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v16, 4
; CHECK-NEXT: vs8r.v v8, (a2)
; CHECK-NEXT: ret
; RV32-NEXT: vmv.v.i v10, 0
; RV32-NEXT: vslide1up.vx v12, v10, a2
; RV32-NEXT: vslide1up.vx v10, v12, a1
-; RV32-NEXT: vsetivli zero, 4, e64, m2, tu, mu
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vslideup.vi v8, v10, 3
; RV32-NEXT: vse64.v v8, (a0)
; RV32-NEXT: ret
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV64-NEXT: vle64.v v8, (a0)
; RV64-NEXT: vmv.s.x v10, a1
-; RV64-NEXT: vsetvli zero, zero, e64, m2, tu, mu
; RV64-NEXT: vslideup.vi v8, v10, 3
; RV64-NEXT: vse64.v v8, (a0)
; RV64-NEXT: ret
; RV32-NEXT: vlse32.v v10, (a4), zero
; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv.s.x v10, a3
-; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, mu
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; RV32-NEXT: vslideup.vi v8, v10, 2
; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, mu
; RV32-NEXT: vmv.v.i v10, 0
; RV32-NEXT: vslide1up.vx v12, v10, a2
; RV32-NEXT: vslide1up.vx v10, v12, a1
-; RV32-NEXT: vsetivli zero, 3, e64, m2, tu, mu
+; RV32-NEXT: vsetivli zero, 3, e64, m2, ta, mu
; RV32-NEXT: vslideup.vi v8, v10, 2
; RV32-NEXT: sw a1, 16(a0)
; RV32-NEXT: sw a2, 20(a0)
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmv.s.x v9, a1
-; CHECK-NEXT: vsetivli zero, 15, e8, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 15, e8, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 14
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vse8.v v8, (a0)
; RV32-NEXT: vle16.v v8, (a0)
; RV32-NEXT: vmv.s.x v12, a1
; RV32-NEXT: addi a1, a2, 1
-; RV32-NEXT: vsetvli zero, a1, e16, m4, tu, mu
+; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; RV32-NEXT: vslideup.vx v8, v12, a2
; RV32-NEXT: vsetvli zero, a3, e16, m4, ta, mu
; RV32-NEXT: vse16.v v8, (a0)
; RV64-NEXT: vmv.s.x v12, a1
; RV64-NEXT: sext.w a1, a2
; RV64-NEXT: addi a2, a1, 1
-; RV64-NEXT: vsetvli zero, a2, e16, m4, tu, mu
+; RV64-NEXT: vsetvli zero, a2, e16, m4, ta, mu
; RV64-NEXT: vslideup.vx v8, v12, a1
; RV64-NEXT: vsetvli zero, a3, e16, m4, ta, mu
; RV64-NEXT: vse16.v v8, (a0)
; RV32-NEXT: vle32.v v8, (a0)
; RV32-NEXT: vfmv.s.f v10, fa0
; RV32-NEXT: addi a2, a1, 1
-; RV32-NEXT: vsetvli zero, a2, e32, m2, tu, mu
+; RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu
; RV32-NEXT: vslideup.vx v8, v10, a1
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; RV32-NEXT: vse32.v v8, (a0)
; RV64-NEXT: vfmv.s.f v10, fa0
; RV64-NEXT: sext.w a1, a1
; RV64-NEXT: addi a2, a1, 1
-; RV64-NEXT: vsetvli zero, a2, e32, m2, tu, mu
+; RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu
; RV64-NEXT: vslideup.vx v8, v10, a1
; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; RV64-NEXT: vse32.v v8, (a0)
; RV32-NEXT: li a2, -1
; RV32-NEXT: vmv.s.x v12, a2
; RV32-NEXT: addi a2, a1, 1
-; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vslideup.vx v8, v12, a1
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vse64.v v8, (a0)
; RV64-NEXT: vmv.s.x v12, a2
; RV64-NEXT: sext.w a1, a1
; RV64-NEXT: addi a2, a1, 1
-; RV64-NEXT: vsetvli zero, a2, e64, m4, tu, mu
+; RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV64-NEXT: vslideup.vx v8, v12, a1
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV64-NEXT: vse64.v v8, (a0)
; RV32-NEXT: li a2, 6
; RV32-NEXT: vmv.s.x v12, a2
; RV32-NEXT: addi a2, a1, 1
-; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vslideup.vx v8, v12, a1
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vse64.v v8, (a0)
; RV64-NEXT: vmv.s.x v12, a2
; RV64-NEXT: sext.w a1, a1
; RV64-NEXT: addi a2, a1, 1
-; RV64-NEXT: vsetvli zero, a2, e64, m4, tu, mu
+; RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV64-NEXT: vslideup.vx v8, v12, a1
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV64-NEXT: vse64.v v8, (a0)
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; RV32-NEXT: vmv.s.x v9, a0
; RV32-NEXT: vmv.v.i v8, 0
-; RV32-NEXT: vsetivli zero, 3, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 3, e32, m1, ta, mu
; RV32-NEXT: vslideup.vi v8, v9, 2
; RV32-NEXT: lui a0, %hi(.LCPI12_0)
; RV32-NEXT: addi a0, a0, %lo(.LCPI12_0)
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; RV32-NEXT: vmv.s.x v9, a0
; RV32-NEXT: vmv.v.i v8, 0
-; RV32-NEXT: vsetivli zero, 3, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 3, e32, m1, ta, mu
; RV32-NEXT: vslideup.vi v8, v9, 2
; RV32-NEXT: lui a0, %hi(.LCPI13_0)
; RV32-NEXT: addi a0, a0, %lo(.LCPI13_0)
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vmv.s.x v8, zero
; CHECK-NEXT: vmv.v.i v9, 8
-; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 3
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vse16.v v9, (a0)
; RV32-NEXT: vse32.v v8, (a3)
; RV32-NEXT: vse32.v v8, (a4)
; RV32-NEXT: vmv.s.x v8, zero
-; RV32-NEXT: vsetivli zero, 2, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, mu
; RV32-NEXT: vslideup.vi v9, v8, 1
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; RV32-NEXT: vse32.v v9, (a5)
; RV32-NEXT: li a0, 1
; RV32-NEXT: vmv.s.x v8, a0
; RV32-NEXT: vmv.v.i v9, 0
-; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vslideup.vi v9, v8, 3
; RV32-NEXT: vse32.v v9, (a6)
; RV32-NEXT: ret
; RV64-NEXT: vse32.v v8, (a3)
; RV64-NEXT: vse32.v v8, (a4)
; RV64-NEXT: vmv.s.x v8, zero
-; RV64-NEXT: vsetivli zero, 2, e32, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 2, e32, m1, ta, mu
; RV64-NEXT: vslideup.vi v9, v8, 1
; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; RV64-NEXT: vse32.v v9, (a5)
; RV64-NEXT: li a0, 1
; RV64-NEXT: vmv.s.x v8, a0
; RV64-NEXT: vmv.v.i v9, 0
-; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV64-NEXT: vslideup.vi v9, v8, 3
; RV64-NEXT: vse32.v v9, (a6)
; RV64-NEXT: ret
; CHECK-NEXT: li a0, 3
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: vmv.v.i v10, 4
-; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v10, v8, 1
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vse16.v v10, (a5)
; CHECK-NEXT: li a0, 4
; CHECK-NEXT: vmv.s.x v8, a0
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu
; CHECK-NEXT: vslideup.vi v9, v8, 3
; CHECK-NEXT: vse16.v v9, (a6)
; CHECK-NEXT: ret
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vmv.v.i v8, 0
-; CHECK-NEXT: vsetivli zero, 7, e8, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 7, e8, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 6
; CHECK-NEXT: ret
ret <16 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 3, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 0, i8 0>
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; LMULMAX1-NEXT: vncvt.x.x.w v9, v9
-; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu
+; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
; LMULMAX1-NEXT: vse8.v v8, (a1)
; LMULMAX1-NEXT: ret
; RV32-NEXT: li a0, 5
; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; RV32-NEXT: vmv.s.x v16, a0
-; RV32-NEXT: vmv.v.i v20, 2
-; RV32-NEXT: vsetvli zero, zero, e16, m1, tu, mu
-; RV32-NEXT: vslideup.vi v20, v16, 7
; RV32-NEXT: lui a0, %hi(.LCPI11_0)
; RV32-NEXT: addi a0, a0, %lo(.LCPI11_0)
-; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu
-; RV32-NEXT: vle16.v v21, (a0)
+; RV32-NEXT: vle16.v v20, (a0)
+; RV32-NEXT: vmv.v.i v21, 2
+; RV32-NEXT: vslideup.vi v21, v16, 7
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; RV32-NEXT: vrgatherei16.vv v16, v8, v21
+; RV32-NEXT: vrgatherei16.vv v16, v8, v20
; RV32-NEXT: li a0, 164
; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; RV32-NEXT: vmv.s.x v0, a0
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
-; RV32-NEXT: vrgatherei16.vv v16, v12, v20, v0.t
+; RV32-NEXT: vrgatherei16.vv v16, v12, v21, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
; RV64-LABEL: vrgather_shuffle_vv_v8i64:
; RV64: # %bb.0:
-; RV64-NEXT: li a0, 5
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
-; RV64-NEXT: vmv.s.x v16, a0
-; RV64-NEXT: vmv.v.i v20, 2
-; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, mu
-; RV64-NEXT: vslideup.vi v20, v16, 7
; RV64-NEXT: lui a0, %hi(.LCPI11_0)
; RV64-NEXT: addi a0, a0, %lo(.LCPI11_0)
-; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; RV64-NEXT: vle64.v v24, (a0)
-; RV64-NEXT: vrgather.vv v16, v8, v24
+; RV64-NEXT: vle64.v v20, (a0)
+; RV64-NEXT: li a0, 5
+; RV64-NEXT: vmv.s.x v16, a0
+; RV64-NEXT: vmv.v.i v24, 2
+; RV64-NEXT: vslideup.vi v24, v16, 7
+; RV64-NEXT: vrgather.vv v16, v8, v20
; RV64-NEXT: li a0, 164
; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; RV64-NEXT: vmv.s.x v0, a0
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
-; RV64-NEXT: vrgather.vv v16, v12, v20, v0.t
+; RV64-NEXT: vrgather.vv v16, v12, v24, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%s = shufflevector <8 x i64> %x, <8 x i64> %y, <8 x i32> <i32 1, i32 2, i32 10, i32 5, i32 1, i32 10, i32 3, i32 13>
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vmv.v.i v10, 4
-; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v10, v9, 1
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vrgather.vv v9, v8, v10
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: vmv.v.i v11, 0
-; CHECK-NEXT: vsetivli zero, 3, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 3, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v11, v10, 2
; CHECK-NEXT: li a0, 70
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vrgather.vi v10, v8, 2
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV32-NEXT: vmv.s.x v10, a0
; RV32-NEXT: vmv.v.i v11, 0
-; RV32-NEXT: vsetivli zero, 6, e8, mf2, tu, mu
+; RV32-NEXT: vsetivli zero, 6, e8, mf2, ta, mu
; RV32-NEXT: vslideup.vi v11, v10, 5
; RV32-NEXT: lui a0, 8256
; RV32-NEXT: addi a0, a0, 2
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV64-NEXT: vmv.s.x v10, a0
; RV64-NEXT: vmv.v.i v11, 0
-; RV64-NEXT: vsetivli zero, 6, e8, mf2, tu, mu
+; RV64-NEXT: vsetivli zero, 6, e8, mf2, ta, mu
; RV64-NEXT: vslideup.vi v11, v10, 5
; RV64-NEXT: lui a0, 8256
; RV64-NEXT: addiw a0, a0, 2
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vrgather.vi v8, v9, 3
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, mu
; CHECK-NEXT: vslidedown.vi v9, v8, 2
-; CHECK-NEXT: vsetivli zero, 8, e16, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 6
-; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%s = shufflevector <8 x i16> %x, <8 x i16> poison, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1>
ret <8 x i16> %s
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 3, e32, m2, ta, mu
; CHECK-NEXT: vslidedown.vi v10, v8, 5
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, mu
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; CHECK-NEXT: vslideup.vi v10, v8, 3
-; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
%s = shufflevector <8 x i32> %x, <8 x i32> poison, <8 x i32> <i32 undef, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4>
ret <8 x i32> %s
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 2
-; CHECK-NEXT: vsetivli zero, 8, e16, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 6
; CHECK-NEXT: ret
%s = shufflevector <8 x i16> %x, <8 x i16> %y, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 9>
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 3, e32, m2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 5
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, mu
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%s = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 undef, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12>
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vmv.v.i v10, 3
; CHECK-NEXT: vmerge.vim v10, v10, 2, v0
-; CHECK-NEXT: vsetivli zero, 7, e16, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 7, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vi v10, v9, 6
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vmv.v.i v11, 0
; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu
; CHECK-NEXT: vmv.v.i v12, 0
; CHECK-NEXT: vmv.s.x v12, a1
-; CHECK-NEXT: vsetivli zero, 7, e16, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 7, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vi v11, v9, 6
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: lui a1, %hi(.LCPI53_0)
; CHECK-NEXT: lui a1, 524288
; CHECK-NEXT: vmv.s.x v9, a1
; CHECK-NEXT: vmv.v.i v10, 0
-; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 3, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vi v10, v9, 2
; CHECK-NEXT: lui a1, %hi(.LCPI54_0)
; CHECK-NEXT: addi a1, a1, %lo(.LCPI54_0)
; CHECK-NEXT: li a1, 1
; CHECK-NEXT: vmv.s.x v9, a1
; CHECK-NEXT: vmv.v.i v10, 2
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vslideup.vi v10, v9, 3
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; CHECK-NEXT: vsrl.vv v8, v8, v10
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; RV32-NEXT: vmv.s.x v8, a1
; RV32-NEXT: vmv.v.i v9, 0
-; RV32-NEXT: vsetivli zero, 3, e32, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 3, e32, m1, ta, mu
; RV32-NEXT: vslideup.vi v9, v8, 2
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vsra.vv v8, v10, v9
; LMULMAX1-RV32-NEXT: lui a2, 524288
; LMULMAX1-RV32-NEXT: vmv.s.x v10, a2
; LMULMAX1-RV32-NEXT: vmv.v.i v11, 0
-; LMULMAX1-RV32-NEXT: vsetivli zero, 3, e32, m1, tu, mu
+; LMULMAX1-RV32-NEXT: vsetivli zero, 3, e32, m1, ta, mu
; LMULMAX1-RV32-NEXT: vslideup.vi v11, v10, 2
; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI131_0)
; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI131_0)
; LMULMAX1-RV32-NEXT: li a2, 1
; LMULMAX1-RV32-NEXT: vmv.s.x v12, a2
; LMULMAX1-RV32-NEXT: vmv.v.i v13, 2
-; LMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; LMULMAX1-RV32-NEXT: vslideup.vi v13, v12, 3
-; LMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; LMULMAX1-RV32-NEXT: vsrl.vv v9, v9, v13
; LMULMAX1-RV32-NEXT: vmulhu.vv v10, v8, v10
; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v10
; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; LMULMAX2-RV32-NEXT: vmv.s.x v12, a1
; LMULMAX2-RV32-NEXT: vmv.v.i v14, 0
-; LMULMAX2-RV32-NEXT: vsetivli zero, 6, e32, m2, tu, mu
+; LMULMAX2-RV32-NEXT: vsetivli zero, 6, e32, m2, ta, mu
; LMULMAX2-RV32-NEXT: vslideup.vi v14, v12, 5
; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; LMULMAX2-RV32-NEXT: vmulhu.vv v8, v8, v14
; LMULMAX2-RV64-NEXT: slli a1, a1, 63
; LMULMAX2-RV64-NEXT: vmv.s.x v10, a1
; LMULMAX2-RV64-NEXT: vmv.v.i v12, 0
-; LMULMAX2-RV64-NEXT: vsetivli zero, 3, e64, m2, tu, mu
+; LMULMAX2-RV64-NEXT: vsetivli zero, 3, e64, m2, ta, mu
; LMULMAX2-RV64-NEXT: vslideup.vi v12, v10, 2
; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI132_0)
; LMULMAX2-RV64-NEXT: addi a1, a1, %lo(.LCPI132_0)
; RV32-LMULMAX4-NEXT: lui a0, 748384
; RV32-LMULMAX4-NEXT: addi a0, a0, 1776
; RV32-LMULMAX4-NEXT: vmv.s.x v0, a0
-; RV32-LMULMAX4-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
; RV32-LMULMAX4-NEXT: vslideup.vi v0, v8, 1
; RV32-LMULMAX4-NEXT: ret
;
; RV32-LMULMAX8-NEXT: lui a0, 748384
; RV32-LMULMAX8-NEXT: addi a0, a0, 1776
; RV32-LMULMAX8-NEXT: vmv.s.x v0, a0
-; RV32-LMULMAX8-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
; RV32-LMULMAX8-NEXT: vslideup.vi v0, v8, 1
; RV32-LMULMAX8-NEXT: ret
;
; RV32-LMULMAX4-NEXT: lui a0, 748384
; RV32-LMULMAX4-NEXT: addi a0, a0, 1776
; RV32-LMULMAX4-NEXT: vmv.s.x v0, a0
-; RV32-LMULMAX4-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
; RV32-LMULMAX4-NEXT: vslideup.vi v0, v8, 1
; RV32-LMULMAX4-NEXT: lui a0, 945060
; RV32-LMULMAX4-NEXT: addi a0, a0, -1793
-; RV32-LMULMAX4-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; RV32-LMULMAX4-NEXT: vmv.s.x v9, a0
; RV32-LMULMAX4-NEXT: lui a0, 551776
; RV32-LMULMAX4-NEXT: addi a0, a0, 1776
; RV32-LMULMAX4-NEXT: vmv.s.x v8, a0
-; RV32-LMULMAX4-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
; RV32-LMULMAX4-NEXT: vslideup.vi v8, v9, 1
; RV32-LMULMAX4-NEXT: ret
;
; RV32-LMULMAX8-NEXT: lui a0, 748384
; RV32-LMULMAX8-NEXT: addi a0, a0, 1776
; RV32-LMULMAX8-NEXT: vmv.s.x v0, a0
-; RV32-LMULMAX8-NEXT: vsetivli zero, 2, e32, m1, tu, mu
+; RV32-LMULMAX8-NEXT: vsetivli zero, 2, e32, m1, ta, mu
; RV32-LMULMAX8-NEXT: vslideup.vi v0, v8, 1
; RV32-LMULMAX8-NEXT: lui a0, 551776
; RV32-LMULMAX8-NEXT: addi a0, a0, 1776
-; RV32-LMULMAX8-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; RV32-LMULMAX8-NEXT: vmv.s.x v8, a0
-; RV32-LMULMAX8-NEXT: vsetivli zero, 3, e32, m1, tu, mu
+; RV32-LMULMAX8-NEXT: vsetivli zero, 3, e32, m1, ta, mu
; RV32-LMULMAX8-NEXT: vslideup.vi v0, v8, 2
; RV32-LMULMAX8-NEXT: lui a0, 945060
; RV32-LMULMAX8-NEXT: addi a0, a0, -1793
-; RV32-LMULMAX8-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; RV32-LMULMAX8-NEXT: vmv.s.x v8, a0
-; RV32-LMULMAX8-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; RV32-LMULMAX8-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; RV32-LMULMAX8-NEXT: vslideup.vi v0, v8, 3
; RV32-LMULMAX8-NEXT: ret
;
; RV64-LMULMAX8-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV64-LMULMAX8-NEXT: vmv.s.x v8, a0
; RV64-LMULMAX8-NEXT: vmv.s.x v0, a1
-; RV64-LMULMAX8-NEXT: vsetvli zero, zero, e64, m1, tu, mu
; RV64-LMULMAX8-NEXT: vslideup.vi v0, v8, 1
; RV64-LMULMAX8-NEXT: ret
ret <128 x i1> <i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 0, i1 1, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 1, i1 1, i1 1>
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 1, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 1, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 1, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 1, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v8, v9, 0
; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t
; RV64-NEXT: li a0, 32
-; RV64-NEXT: vsetvli zero, a0, e8, m2, tu, mu
+; RV64-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; RV64-NEXT: vslideup.vi v12, v10, 16
-; RV64-NEXT: vmv2r.v v8, v12
+; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i8, i8* %base, <32 x i8> %idxs
%v = call <32 x i8> @llvm.masked.gather.v32i8.v32p0i8(<32 x i8*> %ptrs, i32 2, <32 x i1> %m, <32 x i8> %passthru)
; RV32-NEXT: vmerge.vim v10, v10, 1, v0
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV32-NEXT: vmv.v.i v11, 0
-; RV32-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; RV32-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; RV32-NEXT: vslideup.vi v11, v10, 0
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV32-NEXT: vmsne.vi v10, v11, 0
; RV32-NEXT: or a0, a1, a0
; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; RV32-NEXT: vmv.s.x v8, a0
-; RV32-NEXT: vsetvli zero, zero, e16, mf4, tu, mu
; RV32-NEXT: vslideup.vi v9, v8, 1
; RV32-NEXT: .LBB4_4: # %else2
; RV32-NEXT: vmv1r.v v8, v9
; RV64-NEXT: vmerge.vim v10, v10, 1, v0
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV64-NEXT: vmv.v.i v11, 0
-; RV64-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; RV64-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; RV64-NEXT: vslideup.vi v11, v10, 0
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV64-NEXT: vmsne.vi v10, v11, 0
; RV64-NEXT: or a0, a1, a0
; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; RV64-NEXT: vmv.s.x v8, a0
-; RV64-NEXT: vsetvli zero, zero, e16, mf4, tu, mu
; RV64-NEXT: vslideup.vi v9, v8, 1
; RV64-NEXT: .LBB4_4: # %else2
; RV64-NEXT: vmv1r.v v8, v9
; RV32-NEXT: vmerge.vim v10, v10, 1, v0
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV32-NEXT: vmv.v.i v11, 0
-; RV32-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; RV32-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; RV32-NEXT: vslideup.vi v11, v10, 0
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV32-NEXT: vmsne.vi v10, v11, 0
; RV32-NEXT: lw a1, 0(a1)
; RV32-NEXT: vslide1up.vx v11, v10, a2
; RV32-NEXT: vslide1up.vx v12, v11, a1
-; RV32-NEXT: vsetivli zero, 1, e64, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu
; RV32-NEXT: vslideup.vi v9, v12, 0
; RV32-NEXT: .LBB5_2: # %else
; RV32-NEXT: andi a0, a0, 2
; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, mu
; RV32-NEXT: vslide1up.vx v8, v10, a1
; RV32-NEXT: vslide1up.vx v10, v8, a0
-; RV32-NEXT: vsetivli zero, 2, e64, m1, tu, mu
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vslideup.vi v9, v10, 1
; RV32-NEXT: .LBB5_4: # %else2
; RV32-NEXT: vmv1r.v v8, v9
; RV64-NEXT: vmerge.vim v10, v10, 1, v0
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV64-NEXT: vmv.v.i v11, 0
-; RV64-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; RV64-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; RV64-NEXT: vslideup.vi v11, v10, 0
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV64-NEXT: vmsne.vi v10, v11, 0
; RV64-NEXT: slli a1, a1, 32
; RV64-NEXT: or a0, a1, a0
; RV64-NEXT: vmv.s.x v8, a0
-; RV64-NEXT: vsetivli zero, 2, e64, m1, tu, mu
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV64-NEXT: vslideup.vi v9, v8, 1
; RV64-NEXT: .LBB5_4: # %else2
; RV64-NEXT: vmv1r.v v8, v9
; RV32-NEXT: vmerge.vim v10, v10, 1, v0
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV32-NEXT: vmv.v.i v11, 0
-; RV32-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; RV32-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; RV32-NEXT: vslideup.vi v11, v10, 0
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV32-NEXT: vmsne.vi v10, v11, 0
; RV64-NEXT: vmerge.vim v9, v9, 1, v0
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV64-NEXT: vmv.v.i v12, 0
-; RV64-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; RV64-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; RV64-NEXT: vslideup.vi v12, v9, 0
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV64-NEXT: vmsne.vi v9, v12, 0
; RV32-NEXT: vmerge.vim v10, v10, 1, v0
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV32-NEXT: vmv.v.i v11, 0
-; RV32-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; RV32-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; RV32-NEXT: vslideup.vi v11, v10, 0
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV32-NEXT: vmsne.vi v10, v11, 0
; RV64-NEXT: vmerge.vim v10, v10, 1, v0
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV64-NEXT: vmv.v.i v11, 0
-; RV64-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; RV64-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; RV64-NEXT: vslideup.vi v11, v10, 0
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV64-NEXT: vmsne.vi v10, v11, 0
; RV32-NEXT: vmerge.vim v8, v8, 1, v0
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV32-NEXT: vmv.v.i v9, 0
-; RV32-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; RV32-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; RV32-NEXT: vslideup.vi v9, v8, 0
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV32-NEXT: vmsne.vi v8, v9, 0
; RV32-NEXT: slli a0, a0, 16
; RV32-NEXT: or a0, a0, a2
; RV32-NEXT: vmv.s.x v9, a0
-; RV32-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
; RV32-NEXT: vslideup.vi v8, v9, 1
; RV32-NEXT: .LBB8_4: # %else2
-; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; RV32-NEXT: vse32.v v8, (a1)
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
; RV64-NEXT: vmerge.vim v8, v8, 1, v0
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV64-NEXT: vmv.v.i v9, 0
-; RV64-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; RV64-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; RV64-NEXT: vslideup.vi v9, v8, 0
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV64-NEXT: vmsne.vi v8, v9, 0
; RV64-NEXT: slli a0, a0, 16
; RV64-NEXT: or a0, a0, a2
; RV64-NEXT: vmv.s.x v9, a0
-; RV64-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
; RV64-NEXT: vslideup.vi v8, v9, 1
; RV64-NEXT: .LBB8_4: # %else2
-; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; RV64-NEXT: vse32.v v8, (a1)
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v10, 0
-; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v10, v9, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v9, v10, 0
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: li a0, 32
-; RV64-NEXT: vsetvli zero, a0, e8, m2, tu, mu
+; RV64-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; RV64-NEXT: vslideup.vi v8, v12, 16
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i8, i8* %base, <32 x i8> %idxs
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 0
; CHECK-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv4i8(<vscale x 4 x i8> %vec, <vscale x 1 x i8> %subvec, i64 0)
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a1, a1, a0
; CHECK-NEXT: add a0, a1, a0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a1
; CHECK-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv4i8(<vscale x 4 x i8> %vec, <vscale x 1 x i8> %subvec, i64 3)
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v16, 0
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 1 x i32> %subvec, i64 0)
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: add a1, a0, a0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a0
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 1 x i32> %subvec, i64 1)
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vi v11, v16, 0
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, <vscale x 1 x i32> %subvec, i64 6)
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 0
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 0)
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: add a1, a0, a0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 1)
; CHECK-NEXT: srli a1, a0, 3
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: add a1, a0, a1
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 2)
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a1, a1, a0
; CHECK-NEXT: add a0, a1, a0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a1
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 3)
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: slli a1, a0, 3
; CHECK-NEXT: sub a0, a1, a0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 7)
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: slli a1, a0, 3
; CHECK-NEXT: sub a0, a1, a0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vslideup.vx v9, v10, a0
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 15)
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v16, 0
; CHECK-NEXT: ret
%v = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv2f16.nxv32f16(<vscale x 32 x half> %vec, <vscale x 2 x half> %subvec, i64 0)
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: add a1, a0, a0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a0
; CHECK-NEXT: ret
%v = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv2f16.nxv32f16(<vscale x 32 x half> %vec, <vscale x 2 x half> %subvec, i64 2)
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: add a1, a0, a0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vx v14, v16, a0
; CHECK-NEXT: ret
%v = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv2f16.nxv32f16(<vscale x 32 x half> %vec, <vscale x 2 x half> %subvec, i64 26)
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v0, v8, 0
; CHECK-NEXT: ret
%vec = call <vscale x 32 x i1> @llvm.experimental.vector.insert.nxv8i1.nxv32i1(<vscale x 32 x i1> %v, <vscale x 8 x i1> %sv, i64 0)
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: add a1, a0, a0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v0, v8, a0
; CHECK-NEXT: ret
%vec = call <vscale x 32 x i1> @llvm.experimental.vector.insert.nxv8i1.nxv32i1(<vscale x 32 x i1> %v, <vscale x 8 x i1> %sv, i64 8)
; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v0, v9, 0
; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v9, v8, a0
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v0, v9, 0
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
-; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, mf4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x half> %v, half %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x half> %v, half %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
-; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x half> %v, half %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x half> %v, half %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
-; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x half> %v, half %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x half> %v, half %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vfmv.s.f v10, fa0
-; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x half> %v, half %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x half> %v, half %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vfmv.s.f v12, fa0
-; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x half> %v, half %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x half> %v, half %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vfmv.s.f v16, fa0
-; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, m8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 32 x half> %v, half %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vfmv.s.f v16, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 32 x half> %v, half %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
-; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e32, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x float> %v, float %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x float> %v, float %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x float> %v, float %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x float> %v, float %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vfmv.s.f v10, fa0
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x float> %v, float %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x float> %v, float %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vfmv.s.f v12, fa0
-; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e32, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x float> %v, float %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x float> %v, float %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vfmv.s.f v16, fa0
-; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e32, m8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x float> %v, float %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vfmv.s.f v16, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x float> %v, float %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
-; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e64, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x double> %v, double %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x double> %v, double %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vfmv.s.f v10, fa0
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x double> %v, double %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x double> %v, double %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vfmv.s.f v12, fa0
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x double> %v, double %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x double> %v, double %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vfmv.s.f v16, fa0
-; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e64, m8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x double> %v, double %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vfmv.s.f v16, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x double> %v, double %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
-; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, mf4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x half> %v, half %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x half> %v, half %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
-; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x half> %v, half %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x half> %v, half %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
-; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x half> %v, half %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x half> %v, half %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vfmv.s.f v10, fa0
-; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x half> %v, half %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x half> %v, half %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vfmv.s.f v12, fa0
-; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x half> %v, half %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x half> %v, half %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vfmv.s.f v16, fa0
-; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, m8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 32 x half> %v, half %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vfmv.s.f v16, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 32 x half> %v, half %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
-; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e32, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x float> %v, float %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x float> %v, float %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x float> %v, float %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x float> %v, float %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vfmv.s.f v10, fa0
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x float> %v, float %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x float> %v, float %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vfmv.s.f v12, fa0
-; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e32, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x float> %v, float %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x float> %v, float %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vfmv.s.f v16, fa0
-; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e32, m8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x float> %v, float %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vfmv.s.f v16, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x float> %v, float %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
-; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e64, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x double> %v, double %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x double> %v, double %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vfmv.s.f v10, fa0
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x double> %v, double %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x double> %v, double %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vfmv.s.f v12, fa0
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x double> %v, double %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x double> %v, double %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vfmv.s.f v16, fa0
-; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e64, m8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x double> %v, double %elt, i32 3
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vfmv.s.f v16, fa0
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x double> %v, double %elt, i32 %idx
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vsetivli zero, 3, e8, mf8, tu, mu
+; CHECK-NEXT: vsetivli zero, 3, e8, mf8, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 2
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vand.vi v8, v9, 1
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vslideup.vx v9, v8, a1
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vand.vi v8, v9, 1
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vsetivli zero, 3, e8, mf4, tu, mu
+; CHECK-NEXT: vsetivli zero, 3, e8, mf4, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 2
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vand.vi v8, v9, 1
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vslideup.vx v9, v8, a1
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vand.vi v8, v9, 1
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vsetivli zero, 3, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 3, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 2
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vand.vi v8, v9, 1
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v9, v8, a1
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vand.vi v8, v9, 1
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vsetivli zero, 3, e8, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 3, e8, m1, ta, mu
; CHECK-NEXT: vslideup.vi v9, v8, 2
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vand.vi v8, v9, 1
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vslideup.vx v9, v8, a1
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vand.vi v8, v9, 1
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
-; CHECK-NEXT: vsetivli zero, 3, e8, m2, tu, mu
+; CHECK-NEXT: vsetivli zero, 3, e8, m2, ta, mu
; CHECK-NEXT: vslideup.vi v10, v8, 2
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vand.vi v8, v10, 1
; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vslideup.vx v10, v8, a1
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vand.vi v8, v10, 1
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: vmv.v.i v12, 0
; CHECK-NEXT: vmerge.vim v12, v12, 1, v0
-; CHECK-NEXT: vsetivli zero, 3, e8, m4, tu, mu
+; CHECK-NEXT: vsetivli zero, 3, e8, m4, ta, mu
; CHECK-NEXT: vslideup.vi v12, v8, 2
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vand.vi v8, v12, 1
; CHECK-NEXT: vmv.v.i v12, 0
; CHECK-NEXT: vmerge.vim v12, v12, 1, v0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vslideup.vx v12, v8, a1
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vand.vi v8, v12, 1
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: vmv.v.i v16, 0
; CHECK-NEXT: vmerge.vim v16, v16, 1, v0
-; CHECK-NEXT: vsetivli zero, 3, e8, m8, tu, mu
+; CHECK-NEXT: vsetivli zero, 3, e8, m8, ta, mu
; CHECK-NEXT: vslideup.vi v16, v8, 2
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vand.vi v8, v16, 1
; CHECK-NEXT: vmv.v.i v16, 0
; CHECK-NEXT: vmerge.vim v16, v16, 1, v0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vslideup.vx v16, v8, a1
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vand.vi v8, v16, 1
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf8, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, mf8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i8> %v, i8 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i8> %v, i8 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf4, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i8> %v, i8 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i8> %v, i8 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i8> %v, i8 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i8> %v, i8 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i8> %v, i8 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i8> %v, i8 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vmv.s.x v10, a0
-; CHECK-NEXT: vsetivli zero, 4, e8, m2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, mu
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vmv.s.x v12, a0
-; CHECK-NEXT: vsetivli zero, 4, e8, m4, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e8, m4, ta, mu
; CHECK-NEXT: vmv.s.x v12, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vmv.s.x v16, a0
-; CHECK-NEXT: vsetivli zero, 4, e8, m8, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, m8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, mu
; CHECK-NEXT: vmv.s.x v16, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, mf4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i16> %v, i16 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i16> %v, i16 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i16> %v, i16 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i16> %v, i16 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i16> %v, i16 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i16> %v, i16 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vmv.s.x v10, a0
-; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, mu
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vmv.s.x v12, a0
-; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e16, m4, ta, mu
; CHECK-NEXT: vmv.s.x v12, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vmv.s.x v16, a0
-; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, m8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e16, m8, ta, mu
; CHECK-NEXT: vmv.s.x v16, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e32, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i32> %v, i32 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i32> %v, i32 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i32> %v, i32 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i32> %v, i32 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vmv.s.x v10, a0
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vmv.s.x v12, a0
-; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e32, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e32, m4, ta, mu
; CHECK-NEXT: vmv.s.x v12, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vmv.s.x v16, a0
-; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e32, m8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, mu
; CHECK-NEXT: vmv.s.x v16, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 %idx
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vslide1up.vx v10, v9, a1
; CHECK-NEXT: vslide1up.vx v9, v10, a0
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i64> %v, i64 %elt, i32 0
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vslide1up.vx v10, v9, a1
; CHECK-NEXT: vslide1up.vx v9, v10, a0
-; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e64, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i64> %v, i64 %elt, i32 3
; CHECK-NEXT: vslide1up.vx v10, v9, a1
; CHECK-NEXT: vslide1up.vx v9, v10, a0
; CHECK-NEXT: addi a0, a2, 1
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a2
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i64> %v, i64 %elt, i32 %idx
; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: vslide1up.vx v12, v10, a1
; CHECK-NEXT: vslide1up.vx v10, v12, a0
-; CHECK-NEXT: vsetivli zero, 1, e64, m2, tu, mu
+; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 0
; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: vslide1up.vx v12, v10, a1
; CHECK-NEXT: vslide1up.vx v10, v12, a0
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 3
; CHECK-NEXT: vslide1up.vx v12, v10, a1
; CHECK-NEXT: vslide1up.vx v10, v12, a0
; CHECK-NEXT: addi a0, a2, 1
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a2
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 %idx
; CHECK-NEXT: vmv.v.i v12, 0
; CHECK-NEXT: vslide1up.vx v16, v12, a1
; CHECK-NEXT: vslide1up.vx v12, v16, a0
-; CHECK-NEXT: vsetivli zero, 1, e64, m4, tu, mu
+; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 0
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 0
; CHECK-NEXT: vmv.v.i v12, 0
; CHECK-NEXT: vslide1up.vx v16, v12, a1
; CHECK-NEXT: vslide1up.vx v12, v16, a0
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 3
; CHECK-NEXT: vslide1up.vx v16, v12, a1
; CHECK-NEXT: vslide1up.vx v12, v16, a0
; CHECK-NEXT: addi a0, a2, 1
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a2
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 %idx
; CHECK-NEXT: vmv.v.i v16, 0
; CHECK-NEXT: vslide1up.vx v24, v16, a1
; CHECK-NEXT: vslide1up.vx v16, v24, a0
-; CHECK-NEXT: vsetivli zero, 1, e64, m8, tu, mu
+; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v16, 0
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 0
; CHECK-NEXT: vmv.v.i v16, 0
; CHECK-NEXT: vslide1up.vx v24, v16, a1
; CHECK-NEXT: vslide1up.vx v16, v24, a0
-; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e64, m8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 3
; CHECK-NEXT: vslide1up.vx v24, v16, a1
; CHECK-NEXT: vslide1up.vx v16, v24, a0
; CHECK-NEXT: addi a0, a2, 1
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a2
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 %idx
; CHECK-NEXT: li a0, 10
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vmv.s.x v10, a0
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i64> %v, i64 10, i32 3
; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, mu
; CHECK-NEXT: vmv.s.x v10, a1
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i64> %v, i64 10, i32 %idx
; CHECK-NEXT: li a0, -1
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vmv.s.x v10, a0
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i64> %v, i64 -1, i32 3
; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, mu
; CHECK-NEXT: vmv.s.x v10, a1
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i64> %v, i64 -1, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf8, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, mf8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i8> %v, i8 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i8> %v, i8 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf4, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i8> %v, i8 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i8> %v, i8 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i8> %v, i8 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i8> %v, i8 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i8> %v, i8 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i8> %v, i8 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vmv.s.x v10, a0
-; CHECK-NEXT: vsetivli zero, 4, e8, m2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, mu
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vmv.s.x v12, a0
-; CHECK-NEXT: vsetivli zero, 4, e8, m4, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e8, m4, ta, mu
; CHECK-NEXT: vmv.s.x v12, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vmv.s.x v16, a0
-; CHECK-NEXT: vsetivli zero, 4, e8, m8, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, m8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, mu
; CHECK-NEXT: vmv.s.x v16, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, mf4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i16> %v, i16 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i16> %v, i16 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i16> %v, i16 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i16> %v, i16 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i16> %v, i16 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i16> %v, i16 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vmv.s.x v10, a0
-; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, mu
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vmv.s.x v12, a0
-; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e16, m4, ta, mu
; CHECK-NEXT: vmv.s.x v12, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vmv.s.x v16, a0
-; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, m8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e16, m8, ta, mu
; CHECK-NEXT: vmv.s.x v16, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e32, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i32> %v, i32 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i32> %v, i32 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i32> %v, i32 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i32> %v, i32 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vmv.s.x v10, a0
-; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e32, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vmv.s.x v12, a0
-; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e32, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e32, m4, ta, mu
; CHECK-NEXT: vmv.s.x v12, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vmv.s.x v16, a0
-; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e32, m8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 3
; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, mu
; CHECK-NEXT: vmv.s.x v16, a0
; CHECK-NEXT: addi a0, a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a1
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e64, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i64> %v, i64 %elt, i32 3
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: sext.w a0, a1
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i64> %v, i64 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vmv.s.x v10, a0
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 3
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: sext.w a0, a1
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vmv.s.x v12, a0
-; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e64, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 3
; CHECK-NEXT: vmv.s.x v12, a0
; CHECK-NEXT: sext.w a0, a1
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 %idx
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vmv.s.x v16, a0
-; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, mu
+; CHECK-NEXT: vsetivli zero, 4, e64, m8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 3
; CHECK-NEXT: vmv.s.x v16, a0
; CHECK-NEXT: sext.w a0, a1
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 %idx
<vscale x 1 x i1>,
iXLen, iXLen);
-define <vscale x 1 x i8> @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, iXLen %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vslideup.vx v9, v8, a0, v0.t
-; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
- <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
- iXLen %1,
- <vscale x 1 x i1> %2,
- iXLen %3, iXLen 3)
+ <vscale x 1 x i8> %1,
+ iXLen %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 3)
ret <vscale x 1 x i8> %a
}
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: srli a0, a0, 3
; RV32-NEXT: add a1, a0, a0
-; RV32-NEXT: vsetvli zero, a1, e8, mf4, tu, mu
+; RV32-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; RV32-NEXT: vslideup.vx v0, v24, a0
; RV32-NEXT: ret
;
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: srli a0, a0, 3
; RV64-NEXT: add a1, a0, a0
-; RV64-NEXT: vsetvli zero, a1, e8, mf4, tu, mu
+; RV64-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; RV64-NEXT: vslideup.vx v0, v24, a0
; RV64-NEXT: ret
%vc = fcmp oeq <vscale x 16 x double> %va, zeroinitializer
; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu
; CHECK-NEXT: vmseq.vi v24, v16, 0
; CHECK-NEXT: vmseq.vi v0, v8, 0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vslideup.vx v0, v24, a0
; CHECK-NEXT: ret
%vc = icmp eq <vscale x 16 x i64> %va, zeroinitializer
<vscale x 1 x i8>,
<vscale x 1 x i8>,
iXLen,
+ iXLen,
iXLen);
define <vscale x 1 x i8> @intrinsic_vslidedown_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, iXLen %1, iXLen %2) nounwind {
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
iXLen %1,
- iXLen %2)
+ iXLen %2,
+ iXLen 1)
ret <vscale x 1 x i8> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
iXLen,
+ iXLen,
iXLen);
-define <vscale x 1 x i8> @intrinsic_vslideup_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, iXLen %1, iXLen %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vslideup_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vslideup.vx v9, v8, a0
-; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
- <vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
- iXLen %1,
- iXLen %2)
+ <vscale x 1 x i8> %1,
+ iXLen %2,
+ iXLen %3,
+ iXLen 1)
ret <vscale x 1 x i8> %a
}
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, mf8, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 1 x i8> @llvm.experimental.vector.splice.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -2
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, mf8, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 2
; CHECK-NEXT: ret
%res = call <vscale x 1 x i8> @llvm.experimental.vector.splice.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, i32 -2)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 1
-; CHECK-NEXT: vsetvli a1, zero, e8, mf8, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%res = call <vscale x 1 x i8> @llvm.experimental.vector.splice.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, i32 1)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, mf4, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 2 x i8> @llvm.experimental.vector.splice.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -4
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, mf4, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 4
; CHECK-NEXT: ret
%res = call <vscale x 2 x i8> @llvm.experimental.vector.splice.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, i32 -4)
; CHECK-NEXT: addi a0, a0, -3
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 3
-; CHECK-NEXT: vsetvli a1, zero, e8, mf4, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%res = call <vscale x 2 x i8> @llvm.experimental.vector.splice.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, i32 3)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, mf2, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 4 x i8> @llvm.experimental.vector.splice.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -8
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, mf2, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 8
; CHECK-NEXT: ret
%res = call <vscale x 4 x i8> @llvm.experimental.vector.splice.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, i32 -8)
; CHECK-NEXT: addi a0, a0, -7
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 7
-; CHECK-NEXT: vsetvli a1, zero, e8, mf2, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%res = call <vscale x 4 x i8> @llvm.experimental.vector.splice.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, i32 7)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, m1, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 8 x i8> @llvm.experimental.vector.splice.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -16
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, m1, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 16
; CHECK-NEXT: ret
%res = call <vscale x 8 x i8> @llvm.experimental.vector.splice.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, i32 -16)
; CHECK-NEXT: addi a0, a0, -15
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 15
-; CHECK-NEXT: vsetvli a1, zero, e8, m1, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%res = call <vscale x 8 x i8> @llvm.experimental.vector.splice.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, i32 15)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, m2, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 1
; CHECK-NEXT: ret
%res = call <vscale x 16 x i8> @llvm.experimental.vector.splice.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 -1)
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, m2, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a1
; CHECK-NEXT: ret
%res = call <vscale x 16 x i8> @llvm.experimental.vector.splice.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 -32)
; CHECK-NEXT: addi a0, a0, -31
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 31
-; CHECK-NEXT: vsetvli a1, zero, e8, m2, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%res = call <vscale x 16 x i8> @llvm.experimental.vector.splice.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 31)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, m4, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 1
; CHECK-NEXT: ret
%res = call <vscale x 32 x i8> @llvm.experimental.vector.splice.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, i32 -1)
; CHECK-NEXT: li a1, 64
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, m4, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a1
; CHECK-NEXT: ret
%res = call <vscale x 32 x i8> @llvm.experimental.vector.splice.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, i32 -64)
; CHECK-NEXT: li a1, 63
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a1
-; CHECK-NEXT: vsetvli a1, zero, e8, m4, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
%res = call <vscale x 32 x i8> @llvm.experimental.vector.splice.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, i32 63)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, m8, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v16, 1
; CHECK-NEXT: ret
%res = call <vscale x 64 x i8> @llvm.experimental.vector.splice.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, i32 -1)
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, m8, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a1
; CHECK-NEXT: ret
%res = call <vscale x 64 x i8> @llvm.experimental.vector.splice.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, i32 -128)
; CHECK-NEXT: li a1, 127
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a1
-; CHECK-NEXT: vsetvli a1, zero, e8, m8, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a0
; CHECK-NEXT: ret
%res = call <vscale x 64 x i8> @llvm.experimental.vector.splice.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, i32 127)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 1 x i16> @llvm.experimental.vector.splice.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -2
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 2
; CHECK-NEXT: ret
%res = call <vscale x 1 x i16> @llvm.experimental.vector.splice.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, i32 -2)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 1
-; CHECK-NEXT: vsetvli a1, zero, e16, mf4, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%res = call <vscale x 1 x i16> @llvm.experimental.vector.splice.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, i32 1)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 2 x i16> @llvm.experimental.vector.splice.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -4
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 4
; CHECK-NEXT: ret
%res = call <vscale x 2 x i16> @llvm.experimental.vector.splice.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, i32 -4)
; CHECK-NEXT: addi a0, a0, -3
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 3
-; CHECK-NEXT: vsetvli a1, zero, e16, mf2, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%res = call <vscale x 2 x i16> @llvm.experimental.vector.splice.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, i32 3)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 4 x i16> @llvm.experimental.vector.splice.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -8
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 8
; CHECK-NEXT: ret
%res = call <vscale x 4 x i16> @llvm.experimental.vector.splice.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, i32 -8)
; CHECK-NEXT: addi a0, a0, -7
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 7
-; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%res = call <vscale x 4 x i16> @llvm.experimental.vector.splice.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, i32 7)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 1
; CHECK-NEXT: ret
%res = call <vscale x 8 x i16> @llvm.experimental.vector.splice.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -16
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 16
; CHECK-NEXT: ret
%res = call <vscale x 8 x i16> @llvm.experimental.vector.splice.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, i32 -16)
; CHECK-NEXT: addi a0, a0, -15
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 15
-; CHECK-NEXT: vsetvli a1, zero, e16, m2, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%res = call <vscale x 8 x i16> @llvm.experimental.vector.splice.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, i32 15)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 1
; CHECK-NEXT: ret
%res = call <vscale x 16 x i16> @llvm.experimental.vector.splice.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, i32 -1)
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a1
; CHECK-NEXT: ret
%res = call <vscale x 16 x i16> @llvm.experimental.vector.splice.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, i32 -32)
; CHECK-NEXT: addi a0, a0, -31
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 31
-; CHECK-NEXT: vsetvli a1, zero, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
%res = call <vscale x 16 x i16> @llvm.experimental.vector.splice.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, i32 31)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, m8, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v16, 1
; CHECK-NEXT: ret
%res = call <vscale x 32 x i16> @llvm.experimental.vector.splice.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, i32 -1)
; CHECK-NEXT: li a1, 64
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, m8, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a1
; CHECK-NEXT: ret
%res = call <vscale x 32 x i16> @llvm.experimental.vector.splice.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, i32 -64)
; CHECK-NEXT: li a1, 63
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a1
-; CHECK-NEXT: vsetvli a1, zero, e16, m8, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a0
; CHECK-NEXT: ret
%res = call <vscale x 32 x i16> @llvm.experimental.vector.splice.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, i32 63)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 1 x i32> @llvm.experimental.vector.splice.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -2
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 2
; CHECK-NEXT: ret
%res = call <vscale x 1 x i32> @llvm.experimental.vector.splice.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, i32 -2)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 1
-; CHECK-NEXT: vsetvli a1, zero, e32, mf2, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%res = call <vscale x 1 x i32> @llvm.experimental.vector.splice.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, i32 1)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 2 x i32> @llvm.experimental.vector.splice.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -4
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 4
; CHECK-NEXT: ret
%res = call <vscale x 2 x i32> @llvm.experimental.vector.splice.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, i32 -4)
; CHECK-NEXT: addi a0, a0, -3
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 3
-; CHECK-NEXT: vsetvli a1, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%res = call <vscale x 2 x i32> @llvm.experimental.vector.splice.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, i32 3)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, m2, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 1
; CHECK-NEXT: ret
%res = call <vscale x 4 x i32> @llvm.experimental.vector.splice.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -8
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, m2, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 8
; CHECK-NEXT: ret
%res = call <vscale x 4 x i32> @llvm.experimental.vector.splice.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 -8)
; CHECK-NEXT: addi a0, a0, -7
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 7
-; CHECK-NEXT: vsetvli a1, zero, e32, m2, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%res = call <vscale x 4 x i32> @llvm.experimental.vector.splice.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 7)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, m4, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 1
; CHECK-NEXT: ret
%res = call <vscale x 8 x i32> @llvm.experimental.vector.splice.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, m4, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 16
; CHECK-NEXT: ret
%res = call <vscale x 8 x i32> @llvm.experimental.vector.splice.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, i32 -16)
; CHECK-NEXT: addi a0, a0, -15
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 15
-; CHECK-NEXT: vsetvli a1, zero, e32, m4, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
%res = call <vscale x 8 x i32> @llvm.experimental.vector.splice.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, i32 15)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, m8, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v16, 1
; CHECK-NEXT: ret
%res = call <vscale x 16 x i32> @llvm.experimental.vector.splice.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, i32 -1)
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, m8, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a1
; CHECK-NEXT: ret
%res = call <vscale x 16 x i32> @llvm.experimental.vector.splice.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, i32 -32)
; CHECK-NEXT: addi a0, a0, -31
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 31
-; CHECK-NEXT: vsetvli a1, zero, e32, m8, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a0
; CHECK-NEXT: ret
%res = call <vscale x 16 x i32> @llvm.experimental.vector.splice.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, i32 31)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 1 x i64> @llvm.experimental.vector.splice.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -2
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 2
; CHECK-NEXT: ret
%res = call <vscale x 1 x i64> @llvm.experimental.vector.splice.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, i32 -2)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 1
-; CHECK-NEXT: vsetvli a1, zero, e64, m1, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%res = call <vscale x 1 x i64> @llvm.experimental.vector.splice.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, i32 1)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 1
; CHECK-NEXT: ret
%res = call <vscale x 2 x i64> @llvm.experimental.vector.splice.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -4
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 4
; CHECK-NEXT: ret
%res = call <vscale x 2 x i64> @llvm.experimental.vector.splice.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, i32 -4)
; CHECK-NEXT: addi a0, a0, -3
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 3
-; CHECK-NEXT: vsetvli a1, zero, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%res = call <vscale x 2 x i64> @llvm.experimental.vector.splice.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, i32 3)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 1
; CHECK-NEXT: ret
%res = call <vscale x 4 x i64> @llvm.experimental.vector.splice.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -8
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 8
; CHECK-NEXT: ret
%res = call <vscale x 4 x i64> @llvm.experimental.vector.splice.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, i32 -8)
; CHECK-NEXT: addi a0, a0, -7
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 7
-; CHECK-NEXT: vsetvli a1, zero, e64, m4, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
%res = call <vscale x 4 x i64> @llvm.experimental.vector.splice.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, i32 7)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v16, 1
; CHECK-NEXT: ret
%res = call <vscale x 8 x i64> @llvm.experimental.vector.splice.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -16
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v16, 16
; CHECK-NEXT: ret
%res = call <vscale x 8 x i64> @llvm.experimental.vector.splice.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, i32 -16)
; CHECK-NEXT: addi a0, a0, -15
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 15
-; CHECK-NEXT: vsetvli a1, zero, e64, m8, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a0
; CHECK-NEXT: ret
%res = call <vscale x 8 x i64> @llvm.experimental.vector.splice.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, i32 15)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 1 x half> @llvm.experimental.vector.splice.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -2
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 2
; CHECK-NEXT: ret
%res = call <vscale x 1 x half> @llvm.experimental.vector.splice.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, i32 -2)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 1
-; CHECK-NEXT: vsetvli a1, zero, e16, mf4, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%res = call <vscale x 1 x half> @llvm.experimental.vector.splice.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, i32 1)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 2 x half> @llvm.experimental.vector.splice.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -4
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 4
; CHECK-NEXT: ret
%res = call <vscale x 2 x half> @llvm.experimental.vector.splice.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, i32 -4)
; CHECK-NEXT: addi a0, a0, -3
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 3
-; CHECK-NEXT: vsetvli a1, zero, e16, mf2, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%res = call <vscale x 2 x half> @llvm.experimental.vector.splice.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, i32 3)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 4 x half> @llvm.experimental.vector.splice.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -8
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 8
; CHECK-NEXT: ret
%res = call <vscale x 4 x half> @llvm.experimental.vector.splice.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, i32 -8)
; CHECK-NEXT: addi a0, a0, -7
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 7
-; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%res = call <vscale x 4 x half> @llvm.experimental.vector.splice.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, i32 7)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 1
; CHECK-NEXT: ret
%res = call <vscale x 8 x half> @llvm.experimental.vector.splice.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -16
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 16
; CHECK-NEXT: ret
%res = call <vscale x 8 x half> @llvm.experimental.vector.splice.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, i32 -16)
; CHECK-NEXT: addi a0, a0, -15
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 15
-; CHECK-NEXT: vsetvli a1, zero, e16, m2, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%res = call <vscale x 8 x half> @llvm.experimental.vector.splice.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, i32 15)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 1
; CHECK-NEXT: ret
%res = call <vscale x 16 x half> @llvm.experimental.vector.splice.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, i32 -1)
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a1
; CHECK-NEXT: ret
%res = call <vscale x 16 x half> @llvm.experimental.vector.splice.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, i32 -32)
; CHECK-NEXT: addi a0, a0, -31
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 31
-; CHECK-NEXT: vsetvli a1, zero, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
%res = call <vscale x 16 x half> @llvm.experimental.vector.splice.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, i32 31)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, m8, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v16, 1
; CHECK-NEXT: ret
%res = call <vscale x 32 x half> @llvm.experimental.vector.splice.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, i32 -1)
; CHECK-NEXT: li a1, 64
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, m8, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a1
; CHECK-NEXT: ret
%res = call <vscale x 32 x half> @llvm.experimental.vector.splice.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, i32 -64)
; CHECK-NEXT: li a1, 63
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a1
-; CHECK-NEXT: vsetvli a1, zero, e16, m8, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a0
; CHECK-NEXT: ret
%res = call <vscale x 32 x half> @llvm.experimental.vector.splice.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, i32 63)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 1 x float> @llvm.experimental.vector.splice.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -2
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 2
; CHECK-NEXT: ret
%res = call <vscale x 1 x float> @llvm.experimental.vector.splice.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, i32 -2)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 1
-; CHECK-NEXT: vsetvli a1, zero, e32, mf2, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%res = call <vscale x 1 x float> @llvm.experimental.vector.splice.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, i32 1)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 2 x float> @llvm.experimental.vector.splice.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -4
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 4
; CHECK-NEXT: ret
%res = call <vscale x 2 x float> @llvm.experimental.vector.splice.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, i32 -4)
; CHECK-NEXT: addi a0, a0, -3
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 3
-; CHECK-NEXT: vsetvli a1, zero, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%res = call <vscale x 2 x float> @llvm.experimental.vector.splice.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, i32 3)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, m2, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 1
; CHECK-NEXT: ret
%res = call <vscale x 4 x float> @llvm.experimental.vector.splice.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -8
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, m2, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 8
; CHECK-NEXT: ret
%res = call <vscale x 4 x float> @llvm.experimental.vector.splice.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, i32 -8)
; CHECK-NEXT: addi a0, a0, -7
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 7
-; CHECK-NEXT: vsetvli a1, zero, e32, m2, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%res = call <vscale x 4 x float> @llvm.experimental.vector.splice.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, i32 7)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, m4, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 1
; CHECK-NEXT: ret
%res = call <vscale x 8 x float> @llvm.experimental.vector.splice.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, m4, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 16
; CHECK-NEXT: ret
%res = call <vscale x 8 x float> @llvm.experimental.vector.splice.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, i32 -16)
; CHECK-NEXT: addi a0, a0, -15
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 15
-; CHECK-NEXT: vsetvli a1, zero, e32, m4, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
%res = call <vscale x 8 x float> @llvm.experimental.vector.splice.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, i32 15)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, m8, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v16, 1
; CHECK-NEXT: ret
%res = call <vscale x 16 x float> @llvm.experimental.vector.splice.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, i32 -1)
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, m8, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a1
; CHECK-NEXT: ret
%res = call <vscale x 16 x float> @llvm.experimental.vector.splice.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, i32 -32)
; CHECK-NEXT: addi a0, a0, -31
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 31
-; CHECK-NEXT: vsetvli a1, zero, e32, m8, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a0
; CHECK-NEXT: ret
%res = call <vscale x 16 x float> @llvm.experimental.vector.splice.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, i32 31)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 1 x double> @llvm.experimental.vector.splice.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -2
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 2
; CHECK-NEXT: ret
%res = call <vscale x 1 x double> @llvm.experimental.vector.splice.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i32 -2)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 1
-; CHECK-NEXT: vsetvli a1, zero, e64, m1, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
%res = call <vscale x 1 x double> @llvm.experimental.vector.splice.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i32 1)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 1
; CHECK-NEXT: ret
%res = call <vscale x 2 x double> @llvm.experimental.vector.splice.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -4
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 4
; CHECK-NEXT: ret
%res = call <vscale x 2 x double> @llvm.experimental.vector.splice.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, i32 -4)
; CHECK-NEXT: addi a0, a0, -3
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 3
-; CHECK-NEXT: vsetvli a1, zero, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%res = call <vscale x 2 x double> @llvm.experimental.vector.splice.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, i32 3)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 1
; CHECK-NEXT: ret
%res = call <vscale x 4 x double> @llvm.experimental.vector.splice.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -8
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 8
; CHECK-NEXT: ret
%res = call <vscale x 4 x double> @llvm.experimental.vector.splice.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, i32 -8)
; CHECK-NEXT: addi a0, a0, -7
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 7
-; CHECK-NEXT: vsetvli a1, zero, e64, m4, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
%res = call <vscale x 4 x double> @llvm.experimental.vector.splice.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, i32 7)
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v16, 1
; CHECK-NEXT: ret
%res = call <vscale x 8 x double> @llvm.experimental.vector.splice.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, i32 -1)
; CHECK-NEXT: addi a0, a0, -16
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, tu, mu
+; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v16, 16
; CHECK-NEXT: ret
%res = call <vscale x 8 x double> @llvm.experimental.vector.splice.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, i32 -16)
; CHECK-NEXT: addi a0, a0, -15
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v8, 15
-; CHECK-NEXT: vsetvli a1, zero, e64, m8, tu, mu
+; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v16, a0
; CHECK-NEXT: ret
%res = call <vscale x 8 x double> @llvm.experimental.vector.splice.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, i32 15)
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 1 x i8> @intrinsic_vslidedown_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 1 x i8> %a
}
define <vscale x 1 x i8> @intrinsic_vslidedown_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 2 x i8> @intrinsic_vslidedown_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 2 x i8> %a
}
define <vscale x 2 x i8> @intrinsic_vslidedown_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 4 x i8> @intrinsic_vslidedown_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 4 x i8> %a
}
define <vscale x 4 x i8> @intrinsic_vslidedown_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 8 x i8> @intrinsic_vslidedown_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 8 x i8> %a
}
define <vscale x 8 x i8> @intrinsic_vslidedown_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 16 x i8> @intrinsic_vslidedown_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 16 x i8> %a
}
define <vscale x 16 x i8> @intrinsic_vslidedown_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 32 x i8> @intrinsic_vslidedown_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 32 x i8> %a
}
define <vscale x 32 x i8> @intrinsic_vslidedown_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 32 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 1 x i16> @intrinsic_vslidedown_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 1 x i16> %a
}
define <vscale x 1 x i16> @intrinsic_vslidedown_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 2 x i16> @intrinsic_vslidedown_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 2 x i16> %a
}
define <vscale x 2 x i16> @intrinsic_vslidedown_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 4 x i16> @intrinsic_vslidedown_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 4 x i16> %a
}
define <vscale x 4 x i16> @intrinsic_vslidedown_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 8 x i16> @intrinsic_vslidedown_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 8 x i16> %a
}
define <vscale x 8 x i16> @intrinsic_vslidedown_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 16 x i16> @intrinsic_vslidedown_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 16 x i16> %a
}
define <vscale x 16 x i16> @intrinsic_vslidedown_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 16 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 1 x i32> @intrinsic_vslidedown_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 1 x i32> %a
}
define <vscale x 1 x i32> @intrinsic_vslidedown_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 2 x i32> @intrinsic_vslidedown_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 2 x i32> %a
}
define <vscale x 2 x i32> @intrinsic_vslidedown_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 4 x i32> @intrinsic_vslidedown_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 4 x i32> %a
}
define <vscale x 4 x i32> @intrinsic_vslidedown_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 8 x i32> @intrinsic_vslidedown_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 8 x i32> %a
}
define <vscale x 8 x i32> @intrinsic_vslidedown_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 8 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 1 x i64> @intrinsic_vslidedown_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 1 x i64> %a
}
define <vscale x 1 x i64> @intrinsic_vslidedown_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 2 x i64> @intrinsic_vslidedown_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 2 x i64> %a
}
define <vscale x 2 x i64> @intrinsic_vslidedown_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 4 x i64> @intrinsic_vslidedown_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 4 x i64> %a
}
define <vscale x 4 x i64> @intrinsic_vslidedown_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 4 x i64> %a
}
<vscale x 1 x half>,
<vscale x 1 x half>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 1 x half> @intrinsic_vslidedown_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 1 x half> %a
}
define <vscale x 1 x half> @intrinsic_vslidedown_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 1 x half> %a
}
<vscale x 2 x half>,
<vscale x 2 x half>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 2 x half> @intrinsic_vslidedown_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 2 x half> %a
}
define <vscale x 2 x half> @intrinsic_vslidedown_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 2 x half> %a
}
<vscale x 4 x half>,
<vscale x 4 x half>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 4 x half> @intrinsic_vslidedown_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 4 x half> %a
}
define <vscale x 4 x half> @intrinsic_vslidedown_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 4 x half> %a
}
<vscale x 8 x half>,
<vscale x 8 x half>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 8 x half> @intrinsic_vslidedown_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 8 x half> %a
}
define <vscale x 8 x half> @intrinsic_vslidedown_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 8 x half> %a
}
<vscale x 16 x half>,
<vscale x 16 x half>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 16 x half> @intrinsic_vslidedown_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 16 x half> %a
}
define <vscale x 16 x half> @intrinsic_vslidedown_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 16 x half> %a
}
<vscale x 1 x float>,
<vscale x 1 x float>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 1 x float> @intrinsic_vslidedown_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 1 x float> %a
}
define <vscale x 1 x float> @intrinsic_vslidedown_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 1 x float> %a
}
<vscale x 2 x float>,
<vscale x 2 x float>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 2 x float> @intrinsic_vslidedown_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 2 x float> %a
}
define <vscale x 2 x float> @intrinsic_vslidedown_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 2 x float> %a
}
<vscale x 4 x float>,
<vscale x 4 x float>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 4 x float> @intrinsic_vslidedown_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 4 x float> %a
}
define <vscale x 4 x float> @intrinsic_vslidedown_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 4 x float> %a
}
<vscale x 8 x float>,
<vscale x 8 x float>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 8 x float> @intrinsic_vslidedown_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 8 x float> %a
}
define <vscale x 8 x float> @intrinsic_vslidedown_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 8 x float> %a
}
<vscale x 1 x double>,
<vscale x 1 x double>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 1 x double> @intrinsic_vslidedown_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 1 x double> %a
}
define <vscale x 1 x double> @intrinsic_vslidedown_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 1 x double> %a
}
<vscale x 2 x double>,
<vscale x 2 x double>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 2 x double> @intrinsic_vslidedown_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 2 x double> %a
}
define <vscale x 2 x double> @intrinsic_vslidedown_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 2 x double> %a
}
<vscale x 4 x double>,
<vscale x 4 x double>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 4 x double> @intrinsic_vslidedown_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 4 x double> %a
}
define <vscale x 4 x double> @intrinsic_vslidedown_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 4 x double> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 1 x i8> @intrinsic_vslidedown_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 1 x i8> %a
}
define <vscale x 1 x i8> @intrinsic_vslidedown_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 2 x i8> @intrinsic_vslidedown_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 2 x i8> %a
}
define <vscale x 2 x i8> @intrinsic_vslidedown_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 4 x i8> @intrinsic_vslidedown_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 4 x i8> %a
}
define <vscale x 4 x i8> @intrinsic_vslidedown_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 8 x i8> @intrinsic_vslidedown_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 8 x i8> %a
}
define <vscale x 8 x i8> @intrinsic_vslidedown_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 16 x i8> @intrinsic_vslidedown_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 16 x i8> %a
}
define <vscale x 16 x i8> @intrinsic_vslidedown_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 32 x i8> @intrinsic_vslidedown_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 32 x i8> %a
}
define <vscale x 32 x i8> @intrinsic_vslidedown_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 32 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 1 x i16> @intrinsic_vslidedown_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 1 x i16> %a
}
define <vscale x 1 x i16> @intrinsic_vslidedown_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 2 x i16> @intrinsic_vslidedown_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 2 x i16> %a
}
define <vscale x 2 x i16> @intrinsic_vslidedown_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 4 x i16> @intrinsic_vslidedown_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 4 x i16> %a
}
define <vscale x 4 x i16> @intrinsic_vslidedown_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 8 x i16> @intrinsic_vslidedown_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 8 x i16> %a
}
define <vscale x 8 x i16> @intrinsic_vslidedown_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 16 x i16> @intrinsic_vslidedown_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 16 x i16> %a
}
define <vscale x 16 x i16> @intrinsic_vslidedown_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 16 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 1 x i32> @intrinsic_vslidedown_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 1 x i32> %a
}
define <vscale x 1 x i32> @intrinsic_vslidedown_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 2 x i32> @intrinsic_vslidedown_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 2 x i32> %a
}
define <vscale x 2 x i32> @intrinsic_vslidedown_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 4 x i32> @intrinsic_vslidedown_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 4 x i32> %a
}
define <vscale x 4 x i32> @intrinsic_vslidedown_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 8 x i32> @intrinsic_vslidedown_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 8 x i32> %a
}
define <vscale x 8 x i32> @intrinsic_vslidedown_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 8 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 1 x i64> @intrinsic_vslidedown_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 1 x i64> %a
}
define <vscale x 1 x i64> @intrinsic_vslidedown_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 2 x i64> @intrinsic_vslidedown_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 2 x i64> %a
}
define <vscale x 2 x i64> @intrinsic_vslidedown_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 4 x i64> @intrinsic_vslidedown_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 4 x i64> %a
}
define <vscale x 4 x i64> @intrinsic_vslidedown_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 4 x i64> %a
}
<vscale x 1 x half>,
<vscale x 1 x half>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 1 x half> @intrinsic_vslidedown_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 1 x half> %a
}
define <vscale x 1 x half> @intrinsic_vslidedown_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 1 x half> %a
}
<vscale x 2 x half>,
<vscale x 2 x half>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 2 x half> @intrinsic_vslidedown_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 2 x half> %a
}
define <vscale x 2 x half> @intrinsic_vslidedown_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 2 x half> %a
}
<vscale x 4 x half>,
<vscale x 4 x half>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 4 x half> @intrinsic_vslidedown_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 4 x half> %a
}
define <vscale x 4 x half> @intrinsic_vslidedown_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 4 x half> %a
}
<vscale x 8 x half>,
<vscale x 8 x half>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 8 x half> @intrinsic_vslidedown_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 8 x half> %a
}
define <vscale x 8 x half> @intrinsic_vslidedown_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 8 x half> %a
}
<vscale x 16 x half>,
<vscale x 16 x half>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 16 x half> @intrinsic_vslidedown_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 16 x half> %a
}
define <vscale x 16 x half> @intrinsic_vslidedown_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 16 x half> %a
}
<vscale x 1 x float>,
<vscale x 1 x float>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 1 x float> @intrinsic_vslidedown_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 1 x float> %a
}
define <vscale x 1 x float> @intrinsic_vslidedown_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 1 x float> %a
}
<vscale x 2 x float>,
<vscale x 2 x float>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 2 x float> @intrinsic_vslidedown_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 2 x float> %a
}
define <vscale x 2 x float> @intrinsic_vslidedown_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 2 x float> %a
}
<vscale x 4 x float>,
<vscale x 4 x float>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 4 x float> @intrinsic_vslidedown_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 4 x float> %a
}
define <vscale x 4 x float> @intrinsic_vslidedown_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 4 x float> %a
}
<vscale x 8 x float>,
<vscale x 8 x float>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 8 x float> @intrinsic_vslidedown_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 8 x float> %a
}
define <vscale x 8 x float> @intrinsic_vslidedown_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 8 x float> %a
}
<vscale x 1 x double>,
<vscale x 1 x double>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 1 x double> @intrinsic_vslidedown_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 1 x double> %a
}
define <vscale x 1 x double> @intrinsic_vslidedown_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 1 x double> %a
}
<vscale x 2 x double>,
<vscale x 2 x double>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 2 x double> @intrinsic_vslidedown_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 2 x double> %a
}
define <vscale x 2 x double> @intrinsic_vslidedown_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 2 x double> %a
}
<vscale x 4 x double>,
<vscale x 4 x double>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 4 x double> @intrinsic_vslidedown_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 4 x double> %a
}
define <vscale x 4 x double> @intrinsic_vslidedown_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 4 x double> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 1 x i8> @intrinsic_vslideup_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 1 x i8> %a
}
define <vscale x 1 x i8> @intrinsic_vslideup_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 2 x i8> @intrinsic_vslideup_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 2 x i8> %a
}
define <vscale x 2 x i8> @intrinsic_vslideup_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 4 x i8> @intrinsic_vslideup_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 4 x i8> %a
}
define <vscale x 4 x i8> @intrinsic_vslideup_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 8 x i8> @intrinsic_vslideup_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 8 x i8> %a
}
define <vscale x 8 x i8> @intrinsic_vslideup_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 16 x i8> @intrinsic_vslideup_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 16 x i8> %a
}
define <vscale x 16 x i8> @intrinsic_vslideup_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 32 x i8> @intrinsic_vslideup_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 32 x i8> %a
}
define <vscale x 32 x i8> @intrinsic_vslideup_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 32 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 1 x i16> @intrinsic_vslideup_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 1 x i16> %a
}
define <vscale x 1 x i16> @intrinsic_vslideup_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 2 x i16> @intrinsic_vslideup_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 2 x i16> %a
}
define <vscale x 2 x i16> @intrinsic_vslideup_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 4 x i16> @intrinsic_vslideup_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 4 x i16> %a
}
define <vscale x 4 x i16> @intrinsic_vslideup_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 8 x i16> @intrinsic_vslideup_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 8 x i16> %a
}
define <vscale x 8 x i16> @intrinsic_vslideup_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 16 x i16> @intrinsic_vslideup_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 16 x i16> %a
}
define <vscale x 16 x i16> @intrinsic_vslideup_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 16 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 1 x i32> @intrinsic_vslideup_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 1 x i32> %a
}
define <vscale x 1 x i32> @intrinsic_vslideup_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 2 x i32> @intrinsic_vslideup_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 2 x i32> %a
}
define <vscale x 2 x i32> @intrinsic_vslideup_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 4 x i32> @intrinsic_vslideup_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 4 x i32> %a
}
define <vscale x 4 x i32> @intrinsic_vslideup_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 8 x i32> @intrinsic_vslideup_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 8 x i32> %a
}
define <vscale x 8 x i32> @intrinsic_vslideup_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 8 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 1 x i64> @intrinsic_vslideup_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 1 x i64> %a
}
define <vscale x 1 x i64> @intrinsic_vslideup_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 2 x i64> @intrinsic_vslideup_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 2 x i64> %a
}
define <vscale x 2 x i64> @intrinsic_vslideup_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 4 x i64> @intrinsic_vslideup_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 4 x i64> %a
}
define <vscale x 4 x i64> @intrinsic_vslideup_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 4 x i64> %a
}
<vscale x 1 x half>,
<vscale x 1 x half>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 1 x half> @intrinsic_vslideup_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 1 x half> %a
}
define <vscale x 1 x half> @intrinsic_vslideup_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 1 x half> %a
}
<vscale x 2 x half>,
<vscale x 2 x half>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 2 x half> @intrinsic_vslideup_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 2 x half> %a
}
define <vscale x 2 x half> @intrinsic_vslideup_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 2 x half> %a
}
<vscale x 4 x half>,
<vscale x 4 x half>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 4 x half> @intrinsic_vslideup_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 4 x half> %a
}
define <vscale x 4 x half> @intrinsic_vslideup_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 4 x half> %a
}
<vscale x 8 x half>,
<vscale x 8 x half>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 8 x half> @intrinsic_vslideup_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 8 x half> %a
}
define <vscale x 8 x half> @intrinsic_vslideup_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 8 x half> %a
}
<vscale x 16 x half>,
<vscale x 16 x half>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 16 x half> @intrinsic_vslideup_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 16 x half> %a
}
define <vscale x 16 x half> @intrinsic_vslideup_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 16 x half> %a
}
<vscale x 1 x float>,
<vscale x 1 x float>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 1 x float> @intrinsic_vslideup_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 1 x float> %a
}
define <vscale x 1 x float> @intrinsic_vslideup_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 1 x float> %a
}
<vscale x 2 x float>,
<vscale x 2 x float>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 2 x float> @intrinsic_vslideup_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 2 x float> %a
}
define <vscale x 2 x float> @intrinsic_vslideup_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 2 x float> %a
}
<vscale x 4 x float>,
<vscale x 4 x float>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 4 x float> @intrinsic_vslideup_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 4 x float> %a
}
define <vscale x 4 x float> @intrinsic_vslideup_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 4 x float> %a
}
<vscale x 8 x float>,
<vscale x 8 x float>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 8 x float> @intrinsic_vslideup_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 8 x float> %a
}
define <vscale x 8 x float> @intrinsic_vslideup_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 8 x float> %a
}
<vscale x 1 x double>,
<vscale x 1 x double>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 1 x double> @intrinsic_vslideup_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 1 x double> %a
}
define <vscale x 1 x double> @intrinsic_vslideup_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 1 x double> %a
}
<vscale x 2 x double>,
<vscale x 2 x double>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 2 x double> @intrinsic_vslideup_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 2 x double> %a
}
define <vscale x 2 x double> @intrinsic_vslideup_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 2 x double> %a
}
<vscale x 4 x double>,
<vscale x 4 x double>,
i32,
- i32);
+ i32,
+ i32
+);
define <vscale x 4 x double> @intrinsic_vslideup_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
i32 %2,
- i32 %3)
+ i32 %3,
+ i32 1)
ret <vscale x 4 x double> %a
}
define <vscale x 4 x double> @intrinsic_vslideup_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
i32 9,
- i32 %2)
+ i32 %2,
+ i32 1)
ret <vscale x 4 x double> %a
}
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 1 x i8> @intrinsic_vslideup_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 1 x i8> %a
}
define <vscale x 1 x i8> @intrinsic_vslideup_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 1 x i8> %a
}
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 2 x i8> @intrinsic_vslideup_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 2 x i8> %a
}
define <vscale x 2 x i8> @intrinsic_vslideup_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 2 x i8> %a
}
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 4 x i8> @intrinsic_vslideup_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 4 x i8> %a
}
define <vscale x 4 x i8> @intrinsic_vslideup_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 4 x i8> %a
}
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 8 x i8> @intrinsic_vslideup_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 8 x i8> %a
}
define <vscale x 8 x i8> @intrinsic_vslideup_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 8 x i8> %a
}
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 16 x i8> @intrinsic_vslideup_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 16 x i8> %a
}
define <vscale x 16 x i8> @intrinsic_vslideup_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 16 x i8> %a
}
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 32 x i8> @intrinsic_vslideup_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 32 x i8> %a
}
define <vscale x 32 x i8> @intrinsic_vslideup_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 32 x i8> %a
}
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 1 x i16> @intrinsic_vslideup_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 1 x i16> %a
}
define <vscale x 1 x i16> @intrinsic_vslideup_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 1 x i16> %a
}
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 2 x i16> @intrinsic_vslideup_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 2 x i16> %a
}
define <vscale x 2 x i16> @intrinsic_vslideup_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 2 x i16> %a
}
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 4 x i16> @intrinsic_vslideup_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 4 x i16> %a
}
define <vscale x 4 x i16> @intrinsic_vslideup_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 4 x i16> %a
}
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 8 x i16> @intrinsic_vslideup_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 8 x i16> %a
}
define <vscale x 8 x i16> @intrinsic_vslideup_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 8 x i16> %a
}
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 16 x i16> @intrinsic_vslideup_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 16 x i16> %a
}
define <vscale x 16 x i16> @intrinsic_vslideup_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 16 x i16> %a
}
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 1 x i32> @intrinsic_vslideup_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 1 x i32> %a
}
define <vscale x 1 x i32> @intrinsic_vslideup_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 1 x i32> %a
}
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 2 x i32> @intrinsic_vslideup_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 2 x i32> %a
}
define <vscale x 2 x i32> @intrinsic_vslideup_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 2 x i32> %a
}
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 4 x i32> @intrinsic_vslideup_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 4 x i32> %a
}
define <vscale x 4 x i32> @intrinsic_vslideup_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 4 x i32> %a
}
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 8 x i32> @intrinsic_vslideup_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 8 x i32> %a
}
define <vscale x 8 x i32> @intrinsic_vslideup_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 8 x i32> %a
}
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 1 x i64> @intrinsic_vslideup_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 1 x i64> %a
}
define <vscale x 1 x i64> @intrinsic_vslideup_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 1 x i64> %a
}
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 2 x i64> @intrinsic_vslideup_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 2 x i64> %a
}
define <vscale x 2 x i64> @intrinsic_vslideup_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 2 x i64> %a
}
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 4 x i64> @intrinsic_vslideup_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 4 x i64> %a
}
define <vscale x 4 x i64> @intrinsic_vslideup_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 4 x i64> %a
}
<vscale x 1 x half>,
<vscale x 1 x half>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 1 x half> @intrinsic_vslideup_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 1 x half> %a
}
define <vscale x 1 x half> @intrinsic_vslideup_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 1 x half> %a
}
<vscale x 2 x half>,
<vscale x 2 x half>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 2 x half> @intrinsic_vslideup_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 2 x half> %a
}
define <vscale x 2 x half> @intrinsic_vslideup_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 2 x half> %a
}
<vscale x 4 x half>,
<vscale x 4 x half>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 4 x half> @intrinsic_vslideup_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 4 x half> %a
}
define <vscale x 4 x half> @intrinsic_vslideup_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 4 x half> %a
}
<vscale x 8 x half>,
<vscale x 8 x half>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 8 x half> @intrinsic_vslideup_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 8 x half> %a
}
define <vscale x 8 x half> @intrinsic_vslideup_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 8 x half> %a
}
<vscale x 16 x half>,
<vscale x 16 x half>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 16 x half> @intrinsic_vslideup_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 16 x half> %a
}
define <vscale x 16 x half> @intrinsic_vslideup_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 16 x half> %a
}
<vscale x 1 x float>,
<vscale x 1 x float>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 1 x float> @intrinsic_vslideup_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 1 x float> %a
}
define <vscale x 1 x float> @intrinsic_vslideup_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 1 x float> %a
}
<vscale x 2 x float>,
<vscale x 2 x float>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 2 x float> @intrinsic_vslideup_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 2 x float> %a
}
define <vscale x 2 x float> @intrinsic_vslideup_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 2 x float> %a
}
<vscale x 4 x float>,
<vscale x 4 x float>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 4 x float> @intrinsic_vslideup_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 4 x float> %a
}
define <vscale x 4 x float> @intrinsic_vslideup_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 4 x float> %a
}
<vscale x 8 x float>,
<vscale x 8 x float>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 8 x float> @intrinsic_vslideup_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 8 x float> %a
}
define <vscale x 8 x float> @intrinsic_vslideup_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 8 x float> %a
}
<vscale x 1 x double>,
<vscale x 1 x double>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 1 x double> @intrinsic_vslideup_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
entry:
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 1 x double> %a
}
define <vscale x 1 x double> @intrinsic_vslideup_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: ret
entry:
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 1 x double> %a
}
<vscale x 2 x double>,
<vscale x 2 x double>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 2 x double> @intrinsic_vslideup_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
entry:
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 2 x double> %a
}
define <vscale x 2 x double> @intrinsic_vslideup_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vi v8, v10, 9
; CHECK-NEXT: ret
entry:
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 2 x double> %a
}
<vscale x 4 x double>,
<vscale x 4 x double>,
i64,
- i64);
+ i64,
+ i64
+);
define <vscale x 4 x double> @intrinsic_vslideup_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: ret
entry:
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
i64 %2,
- i64 %3)
+ i64 %3,
+ i64 1)
ret <vscale x 4 x double> %a
}
define <vscale x 4 x double> @intrinsic_vslideup_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vslideup.vi v8, v12, 9
; CHECK-NEXT: ret
entry:
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
i64 9,
- i64 %2)
+ i64 %2,
+ i64 1)
ret <vscale x 4 x double> %a
}