Instructions.
We will switch all UndefValue to PoisonValue in follow up patches.
Thanks for Kito to help on verification with their interanl testsuite.
Reviewed By: kito-cheng
Differential Revision: https://reviews.llvm.org/D126748
// 15. Vector Reduction Operations
// 15.1. Vector Single-Width Integer Reduction Instructions
-let MaskedPolicyScheme = NonePolicy,
+let UnMaskedPolicyScheme = HasPassthruOperand,
+ MaskedPolicyScheme = HasPassthruOperand,
IsPrototypeDefaultTU = true,
HasMaskPolicy = false in {
defm vredsum : RVVIntReductionBuiltinSet;
default:
break;
}
+ bool HasPassthruOp = DefaultScheme == PolicyScheme::HasPassthruOperand;
if (IsMasked) {
// If HasMaskedOffOperand, insert result type as first input operand if
// need.
NewPrototype.insert(NewPrototype.begin() + NF + 1, MaskoffType);
}
}
+ // Erase passthru operand for TAM
+ if (NF == 1 && IsPrototypeDefaultTU && DefaultPolicy == Policy::TAMA &&
+ HasPassthruOp && !HasMaskedOffOperand)
+ NewPrototype.erase(NewPrototype.begin() + 1);
if (HasMaskedOffOperand && NF > 1) {
// Convert
// (void, op0 address, op1 address, ..., maskedoff0, maskedoff1, ...)
NewPrototype.insert(NewPrototype.begin() + 1, PrototypeDescriptor::Mask);
}
} else if (NF == 1) {
- bool HasPassthruOp = DefaultScheme == PolicyScheme::HasPassthruOperand;
if (DefaultPolicy == Policy::TU && HasPassthruOp && !IsPrototypeDefaultTU)
NewPrototype.insert(NewPrototype.begin(), NewPrototype[0]);
else if (DefaultPolicy == Policy::TA && HasPassthruOp &&
vfloat64m1_t scalar, size_t vl) {
return vfredmax(mask, dst, vector, scalar, vl);
}
+
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredmax_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredmax_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredmax_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredmax_tam(mask, vector, scalar, vl);
+}
vfloat64m1_t scalar, size_t vl) {
return vfredmin(mask, dst, vector, scalar, vl);
}
+
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredmin_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredmin_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredmin_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredmin_tam(mask, vector, scalar, vl);
+}
vfloat64m1_t scalar, size_t vl) {
return vfredosum(mask, dst, vector, scalar, vl);
}
+
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredusum_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredusum_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredusum_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredusum_tam(mask, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredosum_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredosum_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredosum_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredosum_tam(mask, vector, scalar, vl);
+}
vfloat64m1_t scalar, size_t vl) {
return vfwredosum(mask, dst, vector, scalar, vl);
}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tu (vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
+ return vfwredusum_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_ta (vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
+ return vfwredusum_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tum (vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
+ return vfwredusum_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tam (vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
+ return vfwredusum_tam(mask, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_ta(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tum(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum_tam(mask, vector, scalar, vl);
+}
vuint64m1_t scalar, size_t vl) {
return vredand(mask, dst, vector, scalar, vl);
}
+
+// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredand_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredand_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredand_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredand_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredand_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredand_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredand_tam(mask, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredand_tam(mask, vector, scalar, vl);
+}
vuint64m1_t scalar, size_t vl) {
return vredmaxu(mask, dst, vector, scalar, vl);
}
+
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredmax_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredmaxu_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredmax_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredmaxu_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredmax_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredmaxu_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredmax_tam(mask, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredmaxu_tam(mask, vector, scalar, vl);
+}
vuint64m1_t scalar, size_t vl) {
return vredminu(mask, dst, vector, scalar, vl);
}
+
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredmin_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredminu_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredmin_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredminu_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredmin_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredminu_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredmin_tam(mask, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredminu_tam(mask, vector, scalar, vl);
+}
size_t vl) {
return vredor(mask, dst, vector, scalar, vl);
}
+
+// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredor_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredor_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredor_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredor_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredor_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredor_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredor_tam(mask, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredor_tam(mask, vector, scalar, vl);
+}
vuint64m1_t scalar, size_t vl) {
return vredsum(mask, dst, vector, scalar, vl);
}
+
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredsum_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredsum_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredsum_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredsum_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredsum_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredsum_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredsum_tam(mask, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredsum_tam(mask, vector, scalar, vl);
+}
vuint64m1_t scalar, size_t vl) {
return vredxor(mask, dst, vector, scalar, vl);
}
+
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredxor_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredxor_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredxor_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredxor_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredxor_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredxor_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredxor_tam(mask, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredxor_tam(mask, vector, scalar, vl);
+}
vuint64m1_t scalar, size_t vl) {
return vwredsumu(mask, dst, vector, scalar, vl);
}
+
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tu(vint64m1_t merge, vint32mf2_t vector, vint64m1_t scalar, size_t vl) {
+ return vwredsum_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tu(vuint64m1_t merge, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) {
+ return vwredsumu_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32mf2_i64m1_ta(vint32mf2_t vector, vint64m1_t scalar, size_t vl) {
+ return vwredsum_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_ta(vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) {
+ return vwredsumu_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tum(vbool64_t mask, vint64m1_t merge, vint32mf2_t vector, vint64m1_t scalar, size_t vl) {
+ return vwredsum_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tum(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) {
+ return vwredsumu_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tam(vbool64_t mask, vint32mf2_t vector, vint64m1_t scalar, size_t vl) {
+ return vwredsum_tam(mask, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) {
+ return vwredsumu_tam(mask, vector, scalar, vl);
+}
vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m (vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
return vfredmax_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl);
}
+
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredmax_vs_f32mf2_f32m1_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredmax_vs_f32mf2_f32m1_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredmax_vs_f32mf2_f32m1_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredmax_vs_f32mf2_f32m1_tam(mask, vector, scalar, vl);
+}
vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m (vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
return vfredmin_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl);
}
+
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredmin_vs_f32mf2_f32m1_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredmin_vs_f32mf2_f32m1_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredmin_vs_f32mf2_f32m1_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredmin_vs_f32mf2_f32m1_tam(mask, vector, scalar, vl);
+}
vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
return vfredosum_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl);
}
+
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredusum_vs_f32mf2_f32m1_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredusum_vs_f32mf2_f32m1_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredusum_vs_f32mf2_f32m1_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredusum_vs_f32mf2_f32m1_tam(mask, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tu(vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredosum_vs_f32mf2_f32m1_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_ta(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredosum_vs_f32mf2_f32m1_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t merge, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredosum_vs_f32mf2_f32m1_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+ return vfredosum_vs_f32mf2_f32m1_tam(mask, vector, scalar, vl);
+}
vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_m (vbool2_t mask, vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
return vfwredusum_vs_f16m8_f32m1_m(mask, dest, vector, scalar, vl);
}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
+ return vfwredusum_vs_f32mf2_f64m1_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_ta(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
+ return vfwredusum_vs_f32mf2_f64m1_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tum(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
+ return vfwredusum_vs_f32mf2_f64m1_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
+ return vfwredusum_vs_f32mf2_f64m1_tam(mask, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tu(vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum_vs_f32mf2_f64m1_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_ta(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum_vs_f32mf2_f64m1_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[MERGE:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tum(vbool64_t mask, vfloat64m1_t merge, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum_vs_f32mf2_f64m1_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tam(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum_vs_f32mf2_f64m1_tam(mask, vector, scalar, vl);
+}
vuint64m1_t scalar, size_t vl) {
return vredand_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl);
}
+
+// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredand_vs_i32mf2_i32m1_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredand_vs_u32mf2_u32m1_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredand_vs_i32mf2_i32m1_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredand_vs_u32mf2_u32m1_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredand_vs_i32mf2_i32m1_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredand_vs_u32mf2_u32m1_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredand_vs_i32mf2_i32m1_tam(mask, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredand_vs_u32mf2_u32m1_tam(mask, vector, scalar, vl);
+}
vuint64m1_t scalar, size_t vl) {
return vredmaxu_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl);
}
+
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredmax_vs_i32mf2_i32m1_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u32mf2_u32m1_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredmax_vs_i32mf2_i32m1_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u32mf2_u32m1_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredmax_vs_i32mf2_i32m1_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u32mf2_u32m1_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredmax_vs_i32mf2_i32m1_tam(mask, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u32mf2_u32m1_tam(mask, vector, scalar, vl);
+}
vuint64m1_t scalar, size_t vl) {
return vredminu_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl);
}
+
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredmin_vs_i32mf2_i32m1_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredminu_vs_u32mf2_u32m1_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredmin_vs_i32mf2_i32m1_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredminu_vs_u32mf2_u32m1_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredmin_vs_i32mf2_i32m1_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredminu_vs_u32mf2_u32m1_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredmin_vs_i32mf2_i32m1_tam(mask, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredminu_vs_u32mf2_u32m1_tam(mask, vector, scalar, vl);
+}
size_t vl) {
return vredor_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl);
}
+
+// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredor_vs_i32mf2_i32m1_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredor_vs_u32mf2_u32m1_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredor_vs_i32mf2_i32m1_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredor_vs_u32mf2_u32m1_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredor_vs_i32mf2_i32m1_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredor_vs_u32mf2_u32m1_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredor_vs_i32mf2_i32m1_tam(mask, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredor_vs_u32mf2_u32m1_tam(mask, vector, scalar, vl);
+}
vuint64m1_t scalar, size_t vl) {
return vredsum_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl);
}
+
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredsum_vs_i32mf2_i32m1_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredsum_vs_u32mf2_u32m1_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredsum_vs_i32mf2_i32m1_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredsum_vs_u32mf2_u32m1_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredsum_vs_i32mf2_i32m1_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredsum_vs_u32mf2_u32m1_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredsum_vs_i32mf2_i32m1_tam(mask, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredsum_vs_u32mf2_u32m1_tam(mask, vector, scalar, vl);
+}
vuint64m1_t scalar, size_t vl) {
return vredxor_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl);
}
+
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32mf2_i32m1_tu(vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredxor_vs_i32mf2_i32m1_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tu(vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredxor_vs_u32mf2_u32m1_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32mf2_i32m1_ta(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredxor_vs_i32mf2_i32m1_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32mf2_u32m1_ta(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredxor_vs_u32mf2_u32m1_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t merge, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredxor_vs_i32mf2_i32m1_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t merge, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredxor_vs_u32mf2_u32m1_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32mf2_i32m1_tam(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+ return vredxor_vs_i32mf2_i32m1_tam(mask, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+ return vredxor_vs_u32mf2_u32m1_tam(mask, vector, scalar, vl);
+}
vuint64m1_t scalar, size_t vl) {
return vwredsumu_vs_u32m8_u64m1_m(mask, dst, vector, scalar, vl);
}
+
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tu(vint64m1_t merge, vint32mf2_t vector, vint64m1_t scalar, size_t vl) {
+ return vwredsum_vs_i32mf2_i64m1_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tu(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tu(vuint64m1_t merge, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u32mf2_u64m1_tu(merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32mf2_i64m1_ta(vint32mf2_t vector, vint64m1_t scalar, size_t vl) {
+ return vwredsum_vs_i32mf2_i64m1_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_ta(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_ta(vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u32mf2_u64m1_ta(vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tum(vbool64_t mask, vint64m1_t merge, vint32mf2_t vector, vint64m1_t scalar, size_t vl) {
+ return vwredsum_vs_i32mf2_i64m1_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tum(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tum(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u32mf2_u64m1_tum(mask, merge, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tam(vbool64_t mask, vint32mf2_t vector, vint64m1_t scalar, size_t vl) {
+ return vwredsum_vs_i32mf2_i64m1_tam(mask, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_tam(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tam(vbool64_t mask, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u32mf2_u64m1_tam(mask, vector, scalar, vl);
+}
if (RVVI->hasMaskedOffOperand() &&
RVVI->getDefaultPolicy() == Policy::TAMA)
OS << " Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));\n";
+ // Masked reduction cases.
+ if (!RVVI->hasMaskedOffOperand() && RVVI->hasPassthruOperand() &&
+ RVVI->getDefaultPolicy() == Policy::TAMA)
+ OS << " Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));\n";
} else {
OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());\n";
}
if (RVVI->hasPolicyOperand())
OS << " Ops.push_back(ConstantInt::get(Ops.back()->getType(), "
"DefaultPolicy));\n";
- else if (RVVI->hasPassthruOperand() && RVVI->getDefaultPolicy() == Policy::TA)
+ else if (RVVI->hasPassthruOperand() &&
+ RVVI->getDefaultPolicy() == Policy::TA)
OS << " Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));\n";
}