From c680b0dabf367046c9acb397cddc5e3a2194bbaa Mon Sep 17 00:00:00 2001 From: Zakk Chen Date: Tue, 6 Apr 2021 07:57:41 -0700 Subject: [PATCH] [RISCV][Clang] Add all RVV Reduction intrinsic functions. Authored-by: Roger Ferrer Ibanez Co-Authored-by: Zakk Chen Reviewed By: craig.topper Differential Revision: https://reviews.llvm.org/D99964 --- clang/include/clang/Basic/riscv_vector.td | 53 + .../RISCV/rvv-intrinsics-overloaded/vfredmax.c | 291 +++++ .../RISCV/rvv-intrinsics-overloaded/vfredmin.c | 291 +++++ .../RISCV/rvv-intrinsics-overloaded/vfredsum.c | 579 +++++++++ .../RISCV/rvv-intrinsics-overloaded/vfwredsum.c | 331 +++++ .../RISCV/rvv-intrinsics-overloaded/vredand.c | 1372 ++++++++++++++++++++ .../RISCV/rvv-intrinsics-overloaded/vredmax.c | 1372 ++++++++++++++++++++ .../RISCV/rvv-intrinsics-overloaded/vredmin.c | 1372 ++++++++++++++++++++ .../RISCV/rvv-intrinsics-overloaded/vredor.c | 1372 ++++++++++++++++++++ .../RISCV/rvv-intrinsics-overloaded/vredsum.c | 1372 ++++++++++++++++++++ .../RISCV/rvv-intrinsics-overloaded/vredxor.c | 1372 ++++++++++++++++++++ .../RISCV/rvv-intrinsics-overloaded/vwredsum.c | 1124 ++++++++++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c | 291 +++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c | 291 +++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c | 579 +++++++++ .../test/CodeGen/RISCV/rvv-intrinsics/vfwredosum.c | 171 +++ .../test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c | 171 +++ clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c | 1372 ++++++++++++++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c | 1372 ++++++++++++++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c | 1372 ++++++++++++++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c | 1372 ++++++++++++++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c | 1372 ++++++++++++++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c | 1372 ++++++++++++++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c | 1124 ++++++++++++++++ 24 files changed, 21760 insertions(+) create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredosum.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td index 749ffb1..4497d37 100644 --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -404,6 +404,29 @@ class RVVConvToNarrowingSignedBuiltin class RVVConvToNarrowingUnsignedBuiltin : RVVConvBuiltin<"Uv", "UvFw", "si", mangled_name>; +let HasMaskedOffOperand = false in { + multiclass RVVSignedReductionBuiltin { + defm "" : RVVOutOp1BuiltinSet; + } + multiclass RVVUnsignedReductionBuiltin { + defm "" : RVVOutOp1BuiltinSet; + } + multiclass RVVFloatingReductionBuiltin { + defm "" : RVVOutOp1BuiltinSet; + } + multiclass RVVFloatingWidenReductionBuiltin { + defm "" : RVVOutOp1BuiltinSet; + } +} + +multiclass RVVIntReductionBuiltinSet + : RVVSignedReductionBuiltin, + RVVUnsignedReductionBuiltin; + // For widen operation which has different mangling name. multiclass RVVWidenBuiltinSet> suffixes_prototypes> { @@ -961,3 +984,33 @@ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { def vfncvt_f_f_w : RVVConvBuiltin<"v", "vw", "f", "vfncvt_f">; def vfncvt_rod_f_f_w : RVVConvBuiltin<"v", "vw", "f", "vfncvt_rod_f">; } + +// 15. Vector Reduction Operations +// 15.1. Vector Single-Width Integer Reduction Instructions +defm vredsum : RVVIntReductionBuiltinSet; +defm vredmaxu : RVVUnsignedReductionBuiltin; +defm vredmax : RVVSignedReductionBuiltin; +defm vredminu : RVVUnsignedReductionBuiltin; +defm vredmin : RVVSignedReductionBuiltin; +defm vredand : RVVIntReductionBuiltinSet; +defm vredor : RVVIntReductionBuiltinSet; +defm vredxor : RVVIntReductionBuiltinSet; + +// 15.2. Vector Widening Integer Reduction Instructions +// Vector Widening Integer Reduction Operations +let HasMaskedOffOperand = false in { + defm vwredsum : RVVOutOp1BuiltinSet<"vwredsum", "csi", + [["vs", "vSw", "SwSwvSw"]]>; + defm vwredsumu : RVVOutOp1BuiltinSet<"vwredsumu", "csi", + [["vs", "UvUSw", "USwUSwUvUSw"]]>; +} + +// 15.3. Vector Single-Width Floating-Point Reduction Instructions +defm vfredmax : RVVFloatingReductionBuiltin; +defm vfredmin : RVVFloatingReductionBuiltin; +defm vfredsum : RVVFloatingReductionBuiltin; +defm vfredosum : RVVFloatingReductionBuiltin; + +// 15.4. Vector Widening Floating-Point Reduction Instructions +defm vfwredsum : RVVFloatingWidenReductionBuiltin; +defm vfwredosum : RVVFloatingWidenReductionBuiltin; diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c new file mode 100644 index 0000000..c852f99 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c @@ -0,0 +1,291 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f32mf2_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32m1_t dst, + vfloat32mf2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m1_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m2_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m4_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m8_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m1_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m2_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m4_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m8_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, + vfloat32mf2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m1_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, + vfloat32m1_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m2_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, + vfloat32m2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m4_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, + vfloat32m4_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m8_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, + vfloat32m8_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m1_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, + vfloat64m1_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m2_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, + vfloat64m2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m4_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, + vfloat64m4_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m8_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, + vfloat64m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmax(mask, dst, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c new file mode 100644 index 0000000..daf7cd7 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c @@ -0,0 +1,291 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f32mf2_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32m1_t dst, + vfloat32mf2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m1_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m2_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m4_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m8_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m1_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m2_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m4_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m8_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, + vfloat32mf2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m1_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, + vfloat32m1_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m2_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, + vfloat32m2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m4_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, + vfloat32m4_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m8_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, + vfloat32m8_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m1_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, + vfloat64m1_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m2_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, + vfloat64m2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m4_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, + vfloat64m4_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m8_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, + vfloat64m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmin(mask, dst, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c new file mode 100644 index 0000000..5d03271 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c @@ -0,0 +1,579 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f32mf2_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1(vfloat32m1_t dst, + vfloat32mf2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m1_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m2_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m4_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m8_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m1_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m2_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m4_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m8_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f32mf2_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, + vfloat32mf2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m1_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, + vfloat32m1_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m2_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, + vfloat32m2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m4_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, + vfloat32m4_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m8_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, + vfloat32m8_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m1_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, + vfloat64m1_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m2_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, + vfloat64m2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m4_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, + vfloat64m4_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m8_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, + vfloat64m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f32mf2_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32m1_t dst, + vfloat32mf2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredosum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m1_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t dst, + vfloat32m1_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredosum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m2_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m1_t dst, + vfloat32m2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredosum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m4_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m1_t dst, + vfloat32m4_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredosum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m8_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m1_t dst, + vfloat32m8_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredosum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m1_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t dst, + vfloat64m1_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredosum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m2_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m1_t dst, + vfloat64m2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredosum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m4_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m1_t dst, + vfloat64m4_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredosum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m8_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m1_t dst, + vfloat64m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredosum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, + vfloat32mf2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredosum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m1_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, + vfloat32m1_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredosum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m2_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, + vfloat32m2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredosum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m4_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, + vfloat32m4_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredosum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m8_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, + vfloat32m8_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredosum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m1_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, + vfloat64m1_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredosum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m2_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, + vfloat64m2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredosum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m4_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, + vfloat64m4_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredosum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m8_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, + vfloat64m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredosum(mask, dst, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c new file mode 100644 index 0000000..e1339bf --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c @@ -0,0 +1,331 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32mf2_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1(vfloat64m1_t dst, + vfloat32mf2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m1_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1(vfloat64m1_t dst, + vfloat32m1_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m2_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1(vfloat64m1_t dst, + vfloat32m2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m4_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1(vfloat64m1_t dst, + vfloat32m4_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m8_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1(vfloat64m1_t dst, + vfloat32m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst, + vfloat32mf2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst, + vfloat32m1_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst, + vfloat32m2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst, + vfloat32m4_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst, + vfloat32m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32mf2_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat64m1_t dst, + vfloat32mf2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredosum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m1_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat64m1_t dst, + vfloat32m1_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredosum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m2_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat64m1_t dst, + vfloat32m2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredosum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m4_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat64m1_t dst, + vfloat32m4_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredosum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m8_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat64m1_t dst, + vfloat32m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredosum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst, + vfloat32mf2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredosum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst, + vfloat32m1_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredosum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst, + vfloat32m2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredosum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst, + vfloat32m4_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredosum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst, + vfloat32m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredosum(mask, dst, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c new file mode 100644 index 0000000..17b893e --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c @@ -0,0 +1,1372 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vredand_vs_i8mf8_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, + vint8m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i8mf4_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, + vint8m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i8mf2_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, + vint8m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i8m1_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, + vint8m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i8m2_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, + vint8m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i8m4_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, + vint8m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i8m8_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, + vint8m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i16mf4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, + vint16m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i16mf2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, + vint16m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i16m1_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, + vint16m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i16m2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, + vint16m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i16m4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, + vint16m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i16m8_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, + vint16m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i32mf2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, + vint32m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i32m1_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, + vint32m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i32m2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, + vint32m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i32m4_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, + vint32m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i32m8_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, + vint32m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i64m1_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, + vint64m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i64m2_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, + vint64m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i64m4_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, + vint64m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i64m8_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, + vint64m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8mf8_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, + vuint8m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8mf4_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, + vuint8m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8mf2_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, + vuint8m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8m1_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, + vuint8m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8m2_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, + vuint8m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8m4_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, + vuint8m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8m8_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, + vuint8m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u16mf4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u16mf2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u16m1_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, + vuint16m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u16m2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u16m4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u16m8_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, + vuint16m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u32mf2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u32m1_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, + vuint32m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u32m2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u32m4_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, + vuint32m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u32m8_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, + vuint32m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u64m1_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, + vuint64m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u64m2_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, + vuint64m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u64m4_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, + vuint64m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u64m8_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, + vuint64m1_t scalar, size_t vl) { + return vredand(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i8mf8_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, + vint8mf8_t vector, vint8m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i8mf4_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, + vint8mf4_t vector, vint8m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i8mf2_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, + vint8mf2_t vector, vint8m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i8m1_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, + vint8m1_t vector, vint8m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i8m2_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, + vint8m2_t vector, vint8m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i8m4_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, + vint8m4_t vector, vint8m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i8m8_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, + vint8m8_t vector, vint8m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i16mf4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, + vint16mf4_t vector, vint16m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i16mf2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, + vint16mf2_t vector, vint16m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i16m1_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, + vint16m1_t vector, vint16m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i16m2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, + vint16m2_t vector, vint16m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i16m4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, + vint16m4_t vector, vint16m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i16m8_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, + vint16m8_t vector, vint16m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i32mf2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, + vint32mf2_t vector, vint32m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i32m1_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, + vint32m1_t vector, vint32m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i32m2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, + vint32m2_t vector, vint32m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i32m4_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, + vint32m4_t vector, vint32m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i32m8_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, + vint32m8_t vector, vint32m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i64m1_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, + vint64m1_t vector, vint64m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i64m2_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, + vint64m2_t vector, vint64m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i64m4_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, + vint64m4_t vector, vint64m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i64m8_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, + vint64m8_t vector, vint64m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8mf8_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, + vuint8mf8_t vector, vuint8m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8mf4_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, + vuint8mf4_t vector, vuint8m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8mf2_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, + vuint8mf2_t vector, vuint8m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8m1_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, + vuint8m1_t vector, vuint8m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8m2_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, + vuint8m2_t vector, vuint8m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8m4_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, + vuint8m4_t vector, vuint8m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8m8_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, + vuint8m8_t vector, vuint8m1_t scalar, + size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u16mf4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, + vuint16mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u16mf2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, + vuint16mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u16m1_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, + vuint16m1_t vector, + vuint16m1_t scalar, size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u16m2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, + vuint16m2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u16m4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, + vuint16m4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u16m8_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, + vuint16m8_t vector, + vuint16m1_t scalar, size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u32mf2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, + vuint32mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u32m1_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, + vuint32m1_t vector, + vuint32m1_t scalar, size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u32m2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, + vuint32m2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u32m4_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, + vuint32m4_t vector, + vuint32m1_t scalar, size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u32m8_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, + vuint32m8_t vector, + vuint32m1_t scalar, size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u64m1_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, + vuint64m1_t vector, + vuint64m1_t scalar, size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u64m2_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, + vuint64m2_t vector, + vuint64m1_t scalar, size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u64m4_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, + vuint64m4_t vector, + vuint64m1_t scalar, size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u64m8_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, + vuint64m8_t vector, + vuint64m1_t scalar, size_t vl) { + return vredand(mask, dst, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c new file mode 100644 index 0000000..e6b215d --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c @@ -0,0 +1,1372 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf8_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, + vint8m1_t scalar, size_t vl) { + return vredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf4_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, + vint8m1_t scalar, size_t vl) { + return vredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf2_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, + vint8m1_t scalar, size_t vl) { + return vredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8m1_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, + vint8m1_t scalar, size_t vl) { + return vredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8m2_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, + vint8m1_t scalar, size_t vl) { + return vredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8m4_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, + vint8m1_t scalar, size_t vl) { + return vredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8m8_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, + vint8m1_t scalar, size_t vl) { + return vredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i16mf4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, + vint16m1_t scalar, size_t vl) { + return vredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i16mf2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, + vint16m1_t scalar, size_t vl) { + return vredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i16m1_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, + vint16m1_t scalar, size_t vl) { + return vredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i16m2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, + vint16m1_t scalar, size_t vl) { + return vredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i16m4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, + vint16m1_t scalar, size_t vl) { + return vredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i16m8_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, + vint16m1_t scalar, size_t vl) { + return vredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i32mf2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, + vint32m1_t scalar, size_t vl) { + return vredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i32m1_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, + vint32m1_t scalar, size_t vl) { + return vredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i32m2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, + vint32m1_t scalar, size_t vl) { + return vredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i32m4_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, + vint32m1_t scalar, size_t vl) { + return vredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i32m8_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, + vint32m1_t scalar, size_t vl) { + return vredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i64m1_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, + vint64m1_t scalar, size_t vl) { + return vredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i64m2_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, + vint64m1_t scalar, size_t vl) { + return vredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i64m4_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, + vint64m1_t scalar, size_t vl) { + return vredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i64m8_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, + vint64m1_t scalar, size_t vl) { + return vredmax(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf8_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, + vuint8m1_t scalar, size_t vl) { + return vredmaxu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf4_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, + vuint8m1_t scalar, size_t vl) { + return vredmaxu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf2_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, + vuint8m1_t scalar, size_t vl) { + return vredmaxu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m1_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, + vuint8m1_t scalar, size_t vl) { + return vredmaxu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m2_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, + vuint8m1_t scalar, size_t vl) { + return vredmaxu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m4_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, + vuint8m1_t scalar, size_t vl) { + return vredmaxu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m8_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, + vuint8m1_t scalar, size_t vl) { + return vredmaxu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredmaxu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredmaxu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m1_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, + vuint16m1_t scalar, size_t vl) { + return vredmaxu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredmaxu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredmaxu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m8_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, + vuint16m1_t scalar, size_t vl) { + return vredmaxu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32mf2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredmaxu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m1_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, + vuint32m1_t scalar, size_t vl) { + return vredmaxu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredmaxu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m4_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, + vuint32m1_t scalar, size_t vl) { + return vredmaxu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m8_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, + vuint32m1_t scalar, size_t vl) { + return vredmaxu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m1_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, + vuint64m1_t scalar, size_t vl) { + return vredmaxu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m2_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, + vuint64m1_t scalar, size_t vl) { + return vredmaxu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m4_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, + vuint64m1_t scalar, size_t vl) { + return vredmaxu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m8_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, + vuint64m1_t scalar, size_t vl) { + return vredmaxu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf8_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, + vint8mf8_t vector, vint8m1_t scalar, + size_t vl) { + return vredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf4_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, + vint8mf4_t vector, vint8m1_t scalar, + size_t vl) { + return vredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf2_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, + vint8mf2_t vector, vint8m1_t scalar, + size_t vl) { + return vredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8m1_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, + vint8m1_t vector, vint8m1_t scalar, + size_t vl) { + return vredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8m2_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, + vint8m2_t vector, vint8m1_t scalar, + size_t vl) { + return vredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8m4_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, + vint8m4_t vector, vint8m1_t scalar, + size_t vl) { + return vredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8m8_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, + vint8m8_t vector, vint8m1_t scalar, + size_t vl) { + return vredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i16mf4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, + vint16mf4_t vector, vint16m1_t scalar, + size_t vl) { + return vredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i16mf2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, + vint16mf2_t vector, vint16m1_t scalar, + size_t vl) { + return vredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i16m1_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, + vint16m1_t vector, vint16m1_t scalar, + size_t vl) { + return vredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i16m2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, + vint16m2_t vector, vint16m1_t scalar, + size_t vl) { + return vredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i16m4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, + vint16m4_t vector, vint16m1_t scalar, + size_t vl) { + return vredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i16m8_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, + vint16m8_t vector, vint16m1_t scalar, + size_t vl) { + return vredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i32mf2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, + vint32mf2_t vector, vint32m1_t scalar, + size_t vl) { + return vredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i32m1_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, + vint32m1_t vector, vint32m1_t scalar, + size_t vl) { + return vredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i32m2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, + vint32m2_t vector, vint32m1_t scalar, + size_t vl) { + return vredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i32m4_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, + vint32m4_t vector, vint32m1_t scalar, + size_t vl) { + return vredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i32m8_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, + vint32m8_t vector, vint32m1_t scalar, + size_t vl) { + return vredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i64m1_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, + vint64m1_t vector, vint64m1_t scalar, + size_t vl) { + return vredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i64m2_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, + vint64m2_t vector, vint64m1_t scalar, + size_t vl) { + return vredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i64m4_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, + vint64m4_t vector, vint64m1_t scalar, + size_t vl) { + return vredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i64m8_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, + vint64m8_t vector, vint64m1_t scalar, + size_t vl) { + return vredmax(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, + vuint8mf8_t vector, vuint8m1_t scalar, + size_t vl) { + return vredmaxu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, + vuint8mf4_t vector, vuint8m1_t scalar, + size_t vl) { + return vredmaxu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, + vuint8mf2_t vector, vuint8m1_t scalar, + size_t vl) { + return vredmaxu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m1_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, + vuint8m1_t vector, vuint8m1_t scalar, + size_t vl) { + return vredmaxu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m2_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, + vuint8m2_t vector, vuint8m1_t scalar, + size_t vl) { + return vredmaxu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m4_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, + vuint8m4_t vector, vuint8m1_t scalar, + size_t vl) { + return vredmaxu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m8_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, + vuint8m8_t vector, vuint8m1_t scalar, + size_t vl) { + return vredmaxu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, + vuint16mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredmaxu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, + vuint16mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredmaxu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m1_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, + vuint16m1_t vector, + vuint16m1_t scalar, size_t vl) { + return vredmaxu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, + vuint16m2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredmaxu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, + vuint16m4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredmaxu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m8_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, + vuint16m8_t vector, + vuint16m1_t scalar, size_t vl) { + return vredmaxu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, + vuint32mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredmaxu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m1_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, + vuint32m1_t vector, + vuint32m1_t scalar, size_t vl) { + return vredmaxu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, + vuint32m2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredmaxu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m4_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, + vuint32m4_t vector, + vuint32m1_t scalar, size_t vl) { + return vredmaxu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m8_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, + vuint32m8_t vector, + vuint32m1_t scalar, size_t vl) { + return vredmaxu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m1_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, + vuint64m1_t vector, + vuint64m1_t scalar, size_t vl) { + return vredmaxu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m2_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, + vuint64m2_t vector, + vuint64m1_t scalar, size_t vl) { + return vredmaxu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m4_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, + vuint64m4_t vector, + vuint64m1_t scalar, size_t vl) { + return vredmaxu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m8_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, + vuint64m8_t vector, + vuint64m1_t scalar, size_t vl) { + return vredmaxu(mask, dst, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c new file mode 100644 index 0000000..cefff41 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c @@ -0,0 +1,1372 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf8_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, + vint8m1_t scalar, size_t vl) { + return vredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf4_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, + vint8m1_t scalar, size_t vl) { + return vredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf2_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, + vint8m1_t scalar, size_t vl) { + return vredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8m1_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, + vint8m1_t scalar, size_t vl) { + return vredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8m2_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, + vint8m1_t scalar, size_t vl) { + return vredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8m4_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, + vint8m1_t scalar, size_t vl) { + return vredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8m8_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, + vint8m1_t scalar, size_t vl) { + return vredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i16mf4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, + vint16m1_t scalar, size_t vl) { + return vredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i16mf2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, + vint16m1_t scalar, size_t vl) { + return vredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i16m1_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, + vint16m1_t scalar, size_t vl) { + return vredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i16m2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, + vint16m1_t scalar, size_t vl) { + return vredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i16m4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, + vint16m1_t scalar, size_t vl) { + return vredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i16m8_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, + vint16m1_t scalar, size_t vl) { + return vredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i32mf2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, + vint32m1_t scalar, size_t vl) { + return vredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i32m1_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, + vint32m1_t scalar, size_t vl) { + return vredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i32m2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, + vint32m1_t scalar, size_t vl) { + return vredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i32m4_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, + vint32m1_t scalar, size_t vl) { + return vredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i32m8_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, + vint32m1_t scalar, size_t vl) { + return vredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i64m1_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, + vint64m1_t scalar, size_t vl) { + return vredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i64m2_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, + vint64m1_t scalar, size_t vl) { + return vredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i64m4_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, + vint64m1_t scalar, size_t vl) { + return vredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i64m8_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, + vint64m1_t scalar, size_t vl) { + return vredmin(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf8_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, + vuint8m1_t scalar, size_t vl) { + return vredminu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf4_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, + vuint8m1_t scalar, size_t vl) { + return vredminu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf2_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, + vuint8m1_t scalar, size_t vl) { + return vredminu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8m1_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, + vuint8m1_t scalar, size_t vl) { + return vredminu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8m2_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, + vuint8m1_t scalar, size_t vl) { + return vredminu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8m4_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, + vuint8m1_t scalar, size_t vl) { + return vredminu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8m8_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, + vuint8m1_t scalar, size_t vl) { + return vredminu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u16mf4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredminu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u16mf2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredminu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u16m1_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, + vuint16m1_t scalar, size_t vl) { + return vredminu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u16m2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredminu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u16m4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredminu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u16m8_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, + vuint16m1_t scalar, size_t vl) { + return vredminu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u32mf2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredminu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u32m1_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, + vuint32m1_t scalar, size_t vl) { + return vredminu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u32m2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredminu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u32m4_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, + vuint32m1_t scalar, size_t vl) { + return vredminu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u32m8_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, + vuint32m1_t scalar, size_t vl) { + return vredminu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u64m1_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, + vuint64m1_t scalar, size_t vl) { + return vredminu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u64m2_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, + vuint64m1_t scalar, size_t vl) { + return vredminu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u64m4_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, + vuint64m1_t scalar, size_t vl) { + return vredminu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u64m8_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, + vuint64m1_t scalar, size_t vl) { + return vredminu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf8_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, + vint8mf8_t vector, vint8m1_t scalar, + size_t vl) { + return vredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf4_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, + vint8mf4_t vector, vint8m1_t scalar, + size_t vl) { + return vredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf2_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, + vint8mf2_t vector, vint8m1_t scalar, + size_t vl) { + return vredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8m1_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, + vint8m1_t vector, vint8m1_t scalar, + size_t vl) { + return vredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8m2_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, + vint8m2_t vector, vint8m1_t scalar, + size_t vl) { + return vredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8m4_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, + vint8m4_t vector, vint8m1_t scalar, + size_t vl) { + return vredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8m8_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, + vint8m8_t vector, vint8m1_t scalar, + size_t vl) { + return vredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i16mf4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, + vint16mf4_t vector, vint16m1_t scalar, + size_t vl) { + return vredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i16mf2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, + vint16mf2_t vector, vint16m1_t scalar, + size_t vl) { + return vredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i16m1_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, + vint16m1_t vector, vint16m1_t scalar, + size_t vl) { + return vredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i16m2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, + vint16m2_t vector, vint16m1_t scalar, + size_t vl) { + return vredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i16m4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, + vint16m4_t vector, vint16m1_t scalar, + size_t vl) { + return vredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i16m8_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, + vint16m8_t vector, vint16m1_t scalar, + size_t vl) { + return vredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i32mf2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, + vint32mf2_t vector, vint32m1_t scalar, + size_t vl) { + return vredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i32m1_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, + vint32m1_t vector, vint32m1_t scalar, + size_t vl) { + return vredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i32m2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, + vint32m2_t vector, vint32m1_t scalar, + size_t vl) { + return vredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i32m4_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, + vint32m4_t vector, vint32m1_t scalar, + size_t vl) { + return vredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i32m8_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, + vint32m8_t vector, vint32m1_t scalar, + size_t vl) { + return vredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i64m1_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, + vint64m1_t vector, vint64m1_t scalar, + size_t vl) { + return vredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i64m2_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, + vint64m2_t vector, vint64m1_t scalar, + size_t vl) { + return vredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i64m4_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, + vint64m4_t vector, vint64m1_t scalar, + size_t vl) { + return vredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i64m8_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, + vint64m8_t vector, vint64m1_t scalar, + size_t vl) { + return vredmin(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf8_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, + vuint8mf8_t vector, vuint8m1_t scalar, + size_t vl) { + return vredminu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf4_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, + vuint8mf4_t vector, vuint8m1_t scalar, + size_t vl) { + return vredminu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf2_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, + vuint8mf2_t vector, vuint8m1_t scalar, + size_t vl) { + return vredminu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8m1_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, + vuint8m1_t vector, vuint8m1_t scalar, + size_t vl) { + return vredminu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8m2_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, + vuint8m2_t vector, vuint8m1_t scalar, + size_t vl) { + return vredminu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8m4_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, + vuint8m4_t vector, vuint8m1_t scalar, + size_t vl) { + return vredminu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8m8_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, + vuint8m8_t vector, vuint8m1_t scalar, + size_t vl) { + return vredminu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u16mf4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, + vuint16mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredminu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u16mf2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, + vuint16mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredminu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u16m1_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, + vuint16m1_t vector, + vuint16m1_t scalar, size_t vl) { + return vredminu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u16m2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, + vuint16m2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredminu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u16m4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, + vuint16m4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredminu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u16m8_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, + vuint16m8_t vector, + vuint16m1_t scalar, size_t vl) { + return vredminu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u32mf2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, + vuint32mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredminu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u32m1_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, + vuint32m1_t vector, + vuint32m1_t scalar, size_t vl) { + return vredminu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u32m2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, + vuint32m2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredminu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u32m4_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, + vuint32m4_t vector, + vuint32m1_t scalar, size_t vl) { + return vredminu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u32m8_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, + vuint32m8_t vector, + vuint32m1_t scalar, size_t vl) { + return vredminu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u64m1_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, + vuint64m1_t vector, + vuint64m1_t scalar, size_t vl) { + return vredminu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u64m2_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, + vuint64m2_t vector, + vuint64m1_t scalar, size_t vl) { + return vredminu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u64m4_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, + vuint64m4_t vector, + vuint64m1_t scalar, size_t vl) { + return vredminu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u64m8_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, + vuint64m8_t vector, + vuint64m1_t scalar, size_t vl) { + return vredminu(mask, dst, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c new file mode 100644 index 0000000..034b7ef --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c @@ -0,0 +1,1372 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vredor_vs_i8mf8_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, + vint8m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i8mf4_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, + vint8m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i8mf2_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, + vint8m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i8m1_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, + vint8m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i8m2_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, + vint8m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i8m4_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, + vint8m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i8m8_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, + vint8m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i16mf4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, + vint16m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i16mf2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, + vint16m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i16m1_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, + vint16m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i16m2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, + vint16m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i16m4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, + vint16m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i16m8_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, + vint16m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i32mf2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, + vint32m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i32m1_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, + vint32m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i32m2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, + vint32m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i32m4_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, + vint32m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i32m8_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, + vint32m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i64m1_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, + vint64m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i64m2_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, + vint64m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i64m4_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, + vint64m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i64m8_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, + vint64m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8mf8_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, + vuint8m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8mf4_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, + vuint8m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8mf2_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, + vuint8m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8m1_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, + vuint8m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8m2_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, + vuint8m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8m4_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, + vuint8m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8m8_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, + vuint8m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u16mf4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u16mf2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u16m1_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, + vuint16m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u16m2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u16m4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u16m8_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, + vuint16m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u32mf2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u32m1_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, + vuint32m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u32m2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u32m4_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, + vuint32m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u32m8_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, + vuint32m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u64m1_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, + vuint64m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u64m2_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, + vuint64m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u64m4_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, + vuint64m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u64m8_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, + vuint64m1_t scalar, size_t vl) { + return vredor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i8mf8_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, + vint8mf8_t vector, vint8m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i8mf4_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, + vint8mf4_t vector, vint8m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i8mf2_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, + vint8mf2_t vector, vint8m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i8m1_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, + vint8m1_t vector, vint8m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i8m2_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, + vint8m2_t vector, vint8m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i8m4_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, + vint8m4_t vector, vint8m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i8m8_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, + vint8m8_t vector, vint8m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i16mf4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, + vint16mf4_t vector, vint16m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i16mf2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, + vint16mf2_t vector, vint16m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i16m1_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, + vint16m1_t vector, vint16m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i16m2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, + vint16m2_t vector, vint16m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i16m4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, + vint16m4_t vector, vint16m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i16m8_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, + vint16m8_t vector, vint16m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i32mf2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, + vint32mf2_t vector, vint32m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i32m1_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, + vint32m1_t vector, vint32m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i32m2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, + vint32m2_t vector, vint32m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i32m4_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, + vint32m4_t vector, vint32m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i32m8_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, + vint32m8_t vector, vint32m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i64m1_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, + vint64m1_t vector, vint64m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i64m2_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, + vint64m2_t vector, vint64m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i64m4_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, + vint64m4_t vector, vint64m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i64m8_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, + vint64m8_t vector, vint64m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8mf8_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, + vuint8mf8_t vector, vuint8m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8mf4_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, + vuint8mf4_t vector, vuint8m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8mf2_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, + vuint8mf2_t vector, vuint8m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8m1_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, + vuint8m1_t vector, vuint8m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8m2_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, + vuint8m2_t vector, vuint8m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8m4_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, + vuint8m4_t vector, vuint8m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8m8_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, + vuint8m8_t vector, vuint8m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u16mf4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, + vuint16mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u16mf2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, + vuint16mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u16m1_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, + vuint16m1_t vector, vuint16m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u16m2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, + vuint16m2_t vector, vuint16m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u16m4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, + vuint16m4_t vector, vuint16m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u16m8_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, + vuint16m8_t vector, vuint16m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u32mf2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, + vuint32mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u32m1_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, + vuint32m1_t vector, vuint32m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u32m2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, + vuint32m2_t vector, vuint32m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u32m4_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, + vuint32m4_t vector, vuint32m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u32m8_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, + vuint32m8_t vector, vuint32m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u64m1_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, + vuint64m1_t vector, vuint64m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u64m2_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, + vuint64m2_t vector, vuint64m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u64m4_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, + vuint64m4_t vector, vuint64m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u64m8_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, + vuint64m8_t vector, vuint64m1_t scalar, + size_t vl) { + return vredor(mask, dst, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c new file mode 100644 index 0000000..6fdadc8 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c @@ -0,0 +1,1372 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf8_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, + vint8m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf4_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, + vint8m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf2_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, + vint8m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8m1_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, + vint8m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8m2_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, + vint8m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8m4_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, + vint8m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8m8_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, + vint8m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i16mf4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, + vint16m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i16mf2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, + vint16m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i16m1_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, + vint16m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i16m2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, + vint16m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i16m4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, + vint16m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i16m8_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, + vint16m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i32mf2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, + vint32m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i32m1_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, + vint32m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i32m2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, + vint32m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i32m4_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, + vint32m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i32m8_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, + vint32m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i64m1_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, + vint64m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i64m2_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, + vint64m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i64m4_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, + vint64m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i64m8_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, + vint64m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf8_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, + vuint8m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf4_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, + vuint8m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf2_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, + vuint8m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8m1_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, + vuint8m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8m2_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, + vuint8m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8m4_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, + vuint8m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8m8_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, + vuint8m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u16mf4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u16mf2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u16m1_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, + vuint16m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u16m2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u16m4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u16m8_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, + vuint16m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u32mf2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u32m1_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, + vuint32m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u32m2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u32m4_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, + vuint32m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u32m8_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, + vuint32m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u64m1_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, + vuint64m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u64m2_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, + vuint64m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u64m4_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, + vuint64m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u64m8_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, + vuint64m1_t scalar, size_t vl) { + return vredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf8_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, + vint8mf8_t vector, vint8m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf4_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, + vint8mf4_t vector, vint8m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf2_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, + vint8mf2_t vector, vint8m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8m1_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, + vint8m1_t vector, vint8m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8m2_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, + vint8m2_t vector, vint8m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8m4_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, + vint8m4_t vector, vint8m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8m8_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, + vint8m8_t vector, vint8m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i16mf4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, + vint16mf4_t vector, vint16m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i16mf2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, + vint16mf2_t vector, vint16m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i16m1_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, + vint16m1_t vector, vint16m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i16m2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, + vint16m2_t vector, vint16m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i16m4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, + vint16m4_t vector, vint16m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i16m8_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, + vint16m8_t vector, vint16m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i32mf2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, + vint32mf2_t vector, vint32m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i32m1_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, + vint32m1_t vector, vint32m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i32m2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, + vint32m2_t vector, vint32m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i32m4_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, + vint32m4_t vector, vint32m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i32m8_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, + vint32m8_t vector, vint32m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i64m1_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, + vint64m1_t vector, vint64m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i64m2_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, + vint64m2_t vector, vint64m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i64m4_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, + vint64m4_t vector, vint64m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i64m8_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, + vint64m8_t vector, vint64m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf8_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, + vuint8mf8_t vector, vuint8m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf4_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, + vuint8mf4_t vector, vuint8m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf2_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, + vuint8mf2_t vector, vuint8m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8m1_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, + vuint8m1_t vector, vuint8m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8m2_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, + vuint8m2_t vector, vuint8m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8m4_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, + vuint8m4_t vector, vuint8m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8m8_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, + vuint8m8_t vector, vuint8m1_t scalar, + size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u16mf4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, + vuint16mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u16mf2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, + vuint16mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u16m1_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, + vuint16m1_t vector, + vuint16m1_t scalar, size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u16m2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, + vuint16m2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u16m4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, + vuint16m4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u16m8_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, + vuint16m8_t vector, + vuint16m1_t scalar, size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u32mf2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, + vuint32mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u32m1_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, + vuint32m1_t vector, + vuint32m1_t scalar, size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u32m2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, + vuint32m2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u32m4_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, + vuint32m4_t vector, + vuint32m1_t scalar, size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u32m8_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, + vuint32m8_t vector, + vuint32m1_t scalar, size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u64m1_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, + vuint64m1_t vector, + vuint64m1_t scalar, size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u64m2_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, + vuint64m2_t vector, + vuint64m1_t scalar, size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u64m4_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, + vuint64m4_t vector, + vuint64m1_t scalar, size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u64m8_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, + vuint64m8_t vector, + vuint64m1_t scalar, size_t vl) { + return vredsum(mask, dst, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c new file mode 100644 index 0000000..3c9729a --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c @@ -0,0 +1,1372 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf8_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, + vint8m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf4_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, + vint8m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf2_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, + vint8m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8m1_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, + vint8m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8m2_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, + vint8m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8m4_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, + vint8m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8m8_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, + vint8m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i16mf4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, + vint16m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i16mf2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, + vint16m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i16m1_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, + vint16m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i16m2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, + vint16m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i16m4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, + vint16m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i16m8_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, + vint16m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i32mf2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, + vint32m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i32m1_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, + vint32m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i32m2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, + vint32m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i32m4_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, + vint32m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i32m8_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, + vint32m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i64m1_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, + vint64m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i64m2_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, + vint64m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i64m4_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, + vint64m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i64m8_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, + vint64m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf8_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, + vuint8m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf4_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, + vuint8m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf2_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, + vuint8m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8m1_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, + vuint8m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8m2_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, + vuint8m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8m4_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, + vuint8m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8m8_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, + vuint8m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u16mf4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u16mf2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u16m1_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, + vuint16m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u16m2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u16m4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u16m8_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, + vuint16m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u32mf2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u32m1_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, + vuint32m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u32m2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u32m4_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, + vuint32m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u32m8_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, + vuint32m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u64m1_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, + vuint64m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u64m2_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, + vuint64m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u64m4_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, + vuint64m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u64m8_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, + vuint64m1_t scalar, size_t vl) { + return vredxor(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf8_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, + vint8mf8_t vector, vint8m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf4_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, + vint8mf4_t vector, vint8m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf2_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, + vint8mf2_t vector, vint8m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8m1_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, + vint8m1_t vector, vint8m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8m2_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, + vint8m2_t vector, vint8m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8m4_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, + vint8m4_t vector, vint8m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8m8_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, + vint8m8_t vector, vint8m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i16mf4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, + vint16mf4_t vector, vint16m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i16mf2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, + vint16mf2_t vector, vint16m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i16m1_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, + vint16m1_t vector, vint16m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i16m2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, + vint16m2_t vector, vint16m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i16m4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, + vint16m4_t vector, vint16m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i16m8_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, + vint16m8_t vector, vint16m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i32mf2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, + vint32mf2_t vector, vint32m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i32m1_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, + vint32m1_t vector, vint32m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i32m2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, + vint32m2_t vector, vint32m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i32m4_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, + vint32m4_t vector, vint32m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i32m8_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, + vint32m8_t vector, vint32m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i64m1_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, + vint64m1_t vector, vint64m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i64m2_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, + vint64m2_t vector, vint64m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i64m4_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, + vint64m4_t vector, vint64m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i64m8_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, + vint64m8_t vector, vint64m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf8_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, + vuint8mf8_t vector, vuint8m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf4_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, + vuint8mf4_t vector, vuint8m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf2_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, + vuint8mf2_t vector, vuint8m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8m1_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, + vuint8m1_t vector, vuint8m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8m2_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, + vuint8m2_t vector, vuint8m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8m4_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, + vuint8m4_t vector, vuint8m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8m8_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, + vuint8m8_t vector, vuint8m1_t scalar, + size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u16mf4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, + vuint16mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u16mf2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, + vuint16mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u16m1_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, + vuint16m1_t vector, + vuint16m1_t scalar, size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u16m2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, + vuint16m2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u16m4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, + vuint16m4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u16m8_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, + vuint16m8_t vector, + vuint16m1_t scalar, size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u32mf2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, + vuint32mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u32m1_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, + vuint32m1_t vector, + vuint32m1_t scalar, size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u32m2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, + vuint32m2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u32m4_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, + vuint32m4_t vector, + vuint32m1_t scalar, size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u32m8_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, + vuint32m8_t vector, + vuint32m1_t scalar, size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u64m1_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, + vuint64m1_t vector, + vuint64m1_t scalar, size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u64m2_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, + vuint64m2_t vector, + vuint64m1_t scalar, size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u64m4_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, + vuint64m4_t vector, + vuint64m1_t scalar, size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u64m8_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, + vuint64m8_t vector, + vuint64m1_t scalar, size_t vl) { + return vredxor(mask, dst, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c new file mode 100644 index 0000000..ef6d793 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c @@ -0,0 +1,1124 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf8_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf8_i16m1(vint16m1_t dst, vint8mf8_t vector, + vint16m1_t scalar, size_t vl) { + return vwredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf4_i16m1(vint16m1_t dst, vint8mf4_t vector, + vint16m1_t scalar, size_t vl) { + return vwredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf2_i16m1(vint16m1_t dst, vint8mf2_t vector, + vint16m1_t scalar, size_t vl) { + return vwredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m1_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint16m1_t dst, vint8m1_t vector, + vint16m1_t scalar, size_t vl) { + return vwredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint16m1_t dst, vint8m2_t vector, + vint16m1_t scalar, size_t vl) { + return vwredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint16m1_t dst, vint8m4_t vector, + vint16m1_t scalar, size_t vl) { + return vwredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m8_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint16m1_t dst, vint8m8_t vector, + vint16m1_t scalar, size_t vl) { + return vwredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf4_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16mf4_i32m1(vint32m1_t dst, vint16mf4_t vector, + vint32m1_t scalar, size_t vl) { + return vwredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16mf2_i32m1(vint32m1_t dst, vint16mf2_t vector, + vint32m1_t scalar, size_t vl) { + return vwredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m1_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint32m1_t dst, vint16m1_t vector, + vint32m1_t scalar, size_t vl) { + return vwredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint32m1_t dst, vint16m2_t vector, + vint32m1_t scalar, size_t vl) { + return vwredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m4_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint32m1_t dst, vint16m4_t vector, + vint32m1_t scalar, size_t vl) { + return vwredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m8_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint32m1_t dst, vint16m8_t vector, + vint32m1_t scalar, size_t vl) { + return vwredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i32mf2_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32mf2_i64m1(vint64m1_t dst, vint32mf2_t vector, + vint64m1_t scalar, size_t vl) { + return vwredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m1_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint64m1_t dst, vint32m1_t vector, + vint64m1_t scalar, size_t vl) { + return vwredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m2_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint64m1_t dst, vint32m2_t vector, + vint64m1_t scalar, size_t vl) { + return vwredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m4_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint64m1_t dst, vint32m4_t vector, + vint64m1_t scalar, size_t vl) { + return vwredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m8_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint64m1_t dst, vint32m8_t vector, + vint64m1_t scalar, size_t vl) { + return vwredsum(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf8_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint16m1_t dst, vuint8mf8_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint16m1_t dst, vuint8mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint16m1_t dst, vuint8mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m1_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint16m1_t dst, vuint8m1_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint16m1_t dst, vuint8m2_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint16m1_t dst, vuint8m4_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m8_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint16m1_t dst, vuint8m8_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf4_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint32m1_t dst, vuint16mf4_t vector, + vuint32m1_t scalar, size_t vl) { + return vwredsumu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint32m1_t dst, vuint16mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vwredsumu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m1_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint32m1_t dst, vuint16m1_t vector, + vuint32m1_t scalar, size_t vl) { + return vwredsumu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint32m1_t dst, vuint16m2_t vector, + vuint32m1_t scalar, size_t vl) { + return vwredsumu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m4_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint32m1_t dst, vuint16m4_t vector, + vuint32m1_t scalar, size_t vl) { + return vwredsumu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m8_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint32m1_t dst, vuint16m8_t vector, + vuint32m1_t scalar, size_t vl) { + return vwredsumu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32mf2_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint64m1_t dst, vuint32mf2_t vector, + vuint64m1_t scalar, size_t vl) { + return vwredsumu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m1_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint64m1_t dst, vuint32m1_t vector, + vuint64m1_t scalar, size_t vl) { + return vwredsumu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m2_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint64m1_t dst, vuint32m2_t vector, + vuint64m1_t scalar, size_t vl) { + return vwredsumu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m4_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint64m1_t dst, vuint32m4_t vector, + vuint64m1_t scalar, size_t vl) { + return vwredsumu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m8_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint64m1_t dst, vuint32m8_t vector, + vuint64m1_t scalar, size_t vl) { + return vwredsumu(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf8_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint16m1_t dst, + vint8mf8_t vector, vint16m1_t scalar, + size_t vl) { + return vwredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint16m1_t dst, + vint8mf4_t vector, vint16m1_t scalar, + size_t vl) { + return vwredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint16m1_t dst, + vint8mf2_t vector, vint16m1_t scalar, + size_t vl) { + return vwredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m1_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint16m1_t dst, + vint8m1_t vector, vint16m1_t scalar, + size_t vl) { + return vwredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint16m1_t dst, + vint8m2_t vector, vint16m1_t scalar, + size_t vl) { + return vwredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint16m1_t dst, + vint8m4_t vector, vint16m1_t scalar, + size_t vl) { + return vwredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m8_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint16m1_t dst, + vint8m8_t vector, vint16m1_t scalar, + size_t vl) { + return vwredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf4_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint32m1_t dst, + vint16mf4_t vector, + vint32m1_t scalar, size_t vl) { + return vwredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint32m1_t dst, + vint16mf2_t vector, + vint32m1_t scalar, size_t vl) { + return vwredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m1_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint32m1_t dst, + vint16m1_t vector, vint32m1_t scalar, + size_t vl) { + return vwredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint32m1_t dst, + vint16m2_t vector, vint32m1_t scalar, + size_t vl) { + return vwredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m4_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint32m1_t dst, + vint16m4_t vector, vint32m1_t scalar, + size_t vl) { + return vwredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m8_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint32m1_t dst, + vint16m8_t vector, vint32m1_t scalar, + size_t vl) { + return vwredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i32mf2_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint64m1_t dst, + vint32mf2_t vector, + vint64m1_t scalar, size_t vl) { + return vwredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m1_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint64m1_t dst, + vint32m1_t vector, vint64m1_t scalar, + size_t vl) { + return vwredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m2_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint64m1_t dst, + vint32m2_t vector, vint64m1_t scalar, + size_t vl) { + return vwredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m4_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint64m1_t dst, + vint32m4_t vector, vint64m1_t scalar, + size_t vl) { + return vwredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m8_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint64m1_t dst, + vint32m8_t vector, vint64m1_t scalar, + size_t vl) { + return vwredsum(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint16m1_t dst, + vuint8mf8_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint16m1_t dst, + vuint8mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint16m1_t dst, + vuint8mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m1_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint16m1_t dst, + vuint8m1_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint16m1_t dst, + vuint8m2_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint16m1_t dst, + vuint8m4_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m8_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint16m1_t dst, + vuint8m8_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint32m1_t dst, + vuint16mf4_t vector, + vuint32m1_t scalar, size_t vl) { + return vwredsumu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint32m1_t dst, + vuint16mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vwredsumu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m1_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint32m1_t dst, + vuint16m1_t vector, + vuint32m1_t scalar, size_t vl) { + return vwredsumu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint32m1_t dst, + vuint16m2_t vector, + vuint32m1_t scalar, size_t vl) { + return vwredsumu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m4_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint32m1_t dst, + vuint16m4_t vector, + vuint32m1_t scalar, size_t vl) { + return vwredsumu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m8_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint32m1_t dst, + vuint16m8_t vector, + vuint32m1_t scalar, size_t vl) { + return vwredsumu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint64m1_t dst, + vuint32mf2_t vector, + vuint64m1_t scalar, size_t vl) { + return vwredsumu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m1_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint64m1_t dst, + vuint32m1_t vector, + vuint64m1_t scalar, size_t vl) { + return vwredsumu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m2_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint64m1_t dst, + vuint32m2_t vector, + vuint64m1_t scalar, size_t vl) { + return vwredsumu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m4_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint64m1_t dst, + vuint32m4_t vector, + vuint64m1_t scalar, size_t vl) { + return vwredsumu(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m8_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint64m1_t dst, + vuint32m8_t vector, + vuint64m1_t scalar, size_t vl) { + return vwredsumu(mask, dst, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c new file mode 100644 index 0000000..13ab0a1 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c @@ -0,0 +1,291 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f32mf2_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32m1_t dst, + vfloat32mf2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32mf2_f32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m1_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m1_f32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m2_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m2_f32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m4_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m4_f32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m8_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m8_f32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m1_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m1_f64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m2_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m2_f64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m4_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m4_f64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m8_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m8_f64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, + vfloat32mf2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m1_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, + vfloat32m1_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m2_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, + vfloat32m2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m4_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, + vfloat32m4_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m8_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, + vfloat32m8_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmax_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m1_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, + vfloat64m1_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m2_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, + vfloat64m2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m4_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, + vfloat64m4_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m8_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, + vfloat64m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmax_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c new file mode 100644 index 0000000..6e4b0b1 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c @@ -0,0 +1,291 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f32mf2_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32m1_t dst, + vfloat32mf2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32mf2_f32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m1_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m1_f32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m2_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m2_f32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m4_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m4_f32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m8_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m8_f32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m1_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m1_f64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m2_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m2_f64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m4_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m4_f64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m8_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m8_f64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, + vfloat32mf2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m1_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, + vfloat32m1_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m2_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, + vfloat32m2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m4_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, + vfloat32m4_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m8_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, + vfloat32m8_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredmin_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m1_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, + vfloat64m1_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m2_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, + vfloat64m2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m4_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, + vfloat64m4_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m8_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, + vfloat64m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredmin_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c new file mode 100644 index 0000000..190a6e3 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c @@ -0,0 +1,579 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f32mf2_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1(vfloat32m1_t dst, + vfloat32mf2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredsum_vs_f32mf2_f32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m1_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredsum_vs_f32m1_f32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m2_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredsum_vs_f32m2_f32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m4_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredsum_vs_f32m4_f32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m8_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredsum_vs_f32m8_f32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m1_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredsum_vs_f64m1_f64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m2_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredsum_vs_f64m2_f64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m4_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredsum_vs_f64m4_f64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m8_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredsum_vs_f64m8_f64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f32mf2_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, + vfloat32mf2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredsum_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m1_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, + vfloat32m1_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredsum_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m2_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, + vfloat32m2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredsum_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m4_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, + vfloat32m4_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredsum_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m8_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredsum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, + vfloat32m8_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredsum_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m1_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, + vfloat64m1_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredsum_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m2_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, + vfloat64m2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredsum_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m4_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, + vfloat64m4_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredsum_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m8_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredsum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, + vfloat64m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredsum_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f32mf2_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32m1_t dst, + vfloat32mf2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32mf2_f32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m1_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t dst, + vfloat32m1_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m1_f32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m2_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m1_t dst, + vfloat32m2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m2_f32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m4_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m1_t dst, + vfloat32m4_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m4_f32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m8_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m1_t dst, + vfloat32m8_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m8_f32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m1_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t dst, + vfloat64m1_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m1_f64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m2_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m1_t dst, + vfloat64m2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m2_f64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m4_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m1_t dst, + vfloat64m4_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m4_f64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m8_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m1_t dst, + vfloat64m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m8_f64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, + vfloat32mf2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m1_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, + vfloat32m1_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m2_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, + vfloat32m2_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m4_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, + vfloat32m4_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m8_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, + vfloat32m8_t vector, + vfloat32m1_t scalar, size_t vl) { + return vfredosum_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m1_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, + vfloat64m1_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m2_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, + vfloat64m2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m4_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, + vfloat64m4_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m8_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, + vfloat64m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfredosum_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredosum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredosum.c new file mode 100644 index 0000000..c010bcf --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredosum.c @@ -0,0 +1,171 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32mf2_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat64m1_t dst, + vfloat32mf2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32mf2_f64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m1_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat64m1_t dst, + vfloat32m1_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m1_f64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m2_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat64m1_t dst, + vfloat32m2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m2_f64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m4_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat64m1_t dst, + vfloat32m4_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m4_f64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m8_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat64m1_t dst, + vfloat32m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m8_f64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst, + vfloat32mf2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32mf2_f64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst, + vfloat32m1_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m1_f64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst, + vfloat32m2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m2_f64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst, + vfloat32m4_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m4_f64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst, + vfloat32m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredosum_vs_f32m8_f64m1_m(mask, dst, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c new file mode 100644 index 0000000..023ecd7 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c @@ -0,0 +1,171 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32mf2_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1(vfloat64m1_t dst, + vfloat32mf2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredsum_vs_f32mf2_f64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m1_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1(vfloat64m1_t dst, + vfloat32m1_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredsum_vs_f32m1_f64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m2_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1(vfloat64m1_t dst, + vfloat32m2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredsum_vs_f32m2_f64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m4_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1(vfloat64m1_t dst, + vfloat32m4_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredsum_vs_f32m4_f64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m8_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1(vfloat64m1_t dst, + vfloat32m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredsum_vs_f32m8_f64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst, + vfloat32mf2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredsum_vs_f32mf2_f64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst, + vfloat32m1_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredsum_vs_f32m1_f64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst, + vfloat32m2_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredsum_vs_f32m2_f64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst, + vfloat32m4_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredsum_vs_f32m4_f64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst, + vfloat32m8_t vector, + vfloat64m1_t scalar, size_t vl) { + return vfwredsum_vs_f32m8_f64m1_m(mask, dst, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c new file mode 100644 index 0000000..268f2e4c --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c @@ -0,0 +1,1372 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vredand_vs_i8mf8_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, + vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf8_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i8mf4_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, + vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf4_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i8mf2_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, + vint8m1_t scalar, size_t vl) { + return vredand_vs_i8mf2_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i8m1_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, + vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m1_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i8m2_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, + vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m2_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i8m4_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, + vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m4_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i8m8_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, + vint8m1_t scalar, size_t vl) { + return vredand_vs_i8m8_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i16mf4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, + vint16m1_t scalar, size_t vl) { + return vredand_vs_i16mf4_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i16mf2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, + vint16m1_t scalar, size_t vl) { + return vredand_vs_i16mf2_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i16m1_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, + vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m1_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i16m2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, + vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m2_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i16m4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, + vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m4_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i16m8_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, + vint16m1_t scalar, size_t vl) { + return vredand_vs_i16m8_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i32mf2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, + vint32m1_t scalar, size_t vl) { + return vredand_vs_i32mf2_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i32m1_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, + vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m1_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i32m2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, + vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m2_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i32m4_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, + vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m4_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i32m8_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, + vint32m1_t scalar, size_t vl) { + return vredand_vs_i32m8_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i64m1_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, + vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m1_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i64m2_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, + vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m2_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i64m4_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, + vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m4_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i64m8_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, + vint64m1_t scalar, size_t vl) { + return vredand_vs_i64m8_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8mf8_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, + vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf8_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8mf4_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, + vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf4_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8mf2_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, + vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8mf2_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8m1_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, + vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m1_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8m2_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, + vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m2_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8m4_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, + vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m4_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8m8_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, + vuint8m1_t scalar, size_t vl) { + return vredand_vs_u8m8_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u16mf4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16mf4_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u16mf2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16mf2_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u16m1_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, + vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m1_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u16m2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m2_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u16m4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m4_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u16m8_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, + vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m8_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u32mf2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32mf2_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u32m1_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, + vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m1_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u32m2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m2_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u32m4_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, + vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m4_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u32m8_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, + vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m8_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u64m1_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, + vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m1_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u64m2_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, + vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m2_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u64m4_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, + vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m4_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u64m8_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, + vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m8_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i8mf8_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, + vint8mf8_t vector, vint8m1_t scalar, + size_t vl) { + return vredand_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i8mf4_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, + vint8mf4_t vector, vint8m1_t scalar, + size_t vl) { + return vredand_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i8mf2_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, + vint8mf2_t vector, vint8m1_t scalar, + size_t vl) { + return vredand_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i8m1_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, + vint8m1_t vector, vint8m1_t scalar, + size_t vl) { + return vredand_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i8m2_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, + vint8m2_t vector, vint8m1_t scalar, + size_t vl) { + return vredand_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i8m4_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, + vint8m4_t vector, vint8m1_t scalar, + size_t vl) { + return vredand_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i8m8_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, + vint8m8_t vector, vint8m1_t scalar, + size_t vl) { + return vredand_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i16mf4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, + vint16mf4_t vector, vint16m1_t scalar, + size_t vl) { + return vredand_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i16mf2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, + vint16mf2_t vector, vint16m1_t scalar, + size_t vl) { + return vredand_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i16m1_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, + vint16m1_t vector, vint16m1_t scalar, + size_t vl) { + return vredand_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i16m2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, + vint16m2_t vector, vint16m1_t scalar, + size_t vl) { + return vredand_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i16m4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, + vint16m4_t vector, vint16m1_t scalar, + size_t vl) { + return vredand_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i16m8_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, + vint16m8_t vector, vint16m1_t scalar, + size_t vl) { + return vredand_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i32mf2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, + vint32mf2_t vector, vint32m1_t scalar, + size_t vl) { + return vredand_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i32m1_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, + vint32m1_t vector, vint32m1_t scalar, + size_t vl) { + return vredand_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i32m2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, + vint32m2_t vector, vint32m1_t scalar, + size_t vl) { + return vredand_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i32m4_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, + vint32m4_t vector, vint32m1_t scalar, + size_t vl) { + return vredand_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i32m8_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, + vint32m8_t vector, vint32m1_t scalar, + size_t vl) { + return vredand_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i64m1_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, + vint64m1_t vector, vint64m1_t scalar, + size_t vl) { + return vredand_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i64m2_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, + vint64m2_t vector, vint64m1_t scalar, + size_t vl) { + return vredand_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i64m4_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, + vint64m4_t vector, vint64m1_t scalar, + size_t vl) { + return vredand_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_i64m8_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, + vint64m8_t vector, vint64m1_t scalar, + size_t vl) { + return vredand_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8mf8_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, + vuint8mf8_t vector, vuint8m1_t scalar, + size_t vl) { + return vredand_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8mf4_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, + vuint8mf4_t vector, vuint8m1_t scalar, + size_t vl) { + return vredand_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8mf2_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, + vuint8mf2_t vector, vuint8m1_t scalar, + size_t vl) { + return vredand_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8m1_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, + vuint8m1_t vector, vuint8m1_t scalar, + size_t vl) { + return vredand_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8m2_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, + vuint8m2_t vector, vuint8m1_t scalar, + size_t vl) { + return vredand_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8m4_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, + vuint8m4_t vector, vuint8m1_t scalar, + size_t vl) { + return vredand_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u8m8_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, + vuint8m8_t vector, vuint8m1_t scalar, + size_t vl) { + return vredand_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u16mf4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, + vuint16mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u16mf2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, + vuint16mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u16m1_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, + vuint16m1_t vector, + vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u16m2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, + vuint16m2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u16m4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, + vuint16m4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u16m8_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, + vuint16m8_t vector, + vuint16m1_t scalar, size_t vl) { + return vredand_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u32mf2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, + vuint32mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u32m1_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, + vuint32m1_t vector, + vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u32m2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, + vuint32m2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u32m4_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, + vuint32m4_t vector, + vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u32m8_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, + vuint32m8_t vector, + vuint32m1_t scalar, size_t vl) { + return vredand_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u64m1_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, + vuint64m1_t vector, + vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u64m2_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, + vuint64m2_t vector, + vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u64m4_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, + vuint64m4_t vector, + vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredand_vs_u64m8_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, + vuint64m8_t vector, + vuint64m1_t scalar, size_t vl) { + return vredand_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c new file mode 100644 index 0000000..4901214 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c @@ -0,0 +1,1372 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf8_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, + vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf8_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf4_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, + vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf4_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf2_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, + vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8mf2_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8m1_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, + vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m1_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8m2_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, + vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m2_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8m4_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, + vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m4_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8m8_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, + vint8m1_t scalar, size_t vl) { + return vredmax_vs_i8m8_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i16mf4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, + vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16mf4_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i16mf2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, + vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16mf2_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i16m1_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, + vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m1_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i16m2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, + vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m2_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i16m4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, + vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m4_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i16m8_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, + vint16m1_t scalar, size_t vl) { + return vredmax_vs_i16m8_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i32mf2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, + vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32mf2_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i32m1_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, + vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m1_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i32m2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, + vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m2_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i32m4_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, + vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m4_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i32m8_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, + vint32m1_t scalar, size_t vl) { + return vredmax_vs_i32m8_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i64m1_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, + vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m1_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i64m2_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, + vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m2_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i64m4_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, + vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m4_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i64m8_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, + vint64m1_t scalar, size_t vl) { + return vredmax_vs_i64m8_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf8_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, + vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf8_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf4_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, + vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf4_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf2_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, + vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8mf2_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m1_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, + vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m1_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m2_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, + vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m2_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m4_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, + vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m4_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m8_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, + vuint8m1_t scalar, size_t vl) { + return vredmaxu_vs_u8m8_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16mf4_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16mf2_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m1_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, + vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m1_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m2_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m4_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m8_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, + vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m8_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32mf2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32mf2_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m1_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, + vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m1_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m2_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m4_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, + vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m4_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m8_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, + vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m8_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m1_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, + vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m1_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m2_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, + vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m2_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m4_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, + vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m4_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m8_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, + vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m8_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf8_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, + vint8mf8_t vector, vint8m1_t scalar, + size_t vl) { + return vredmax_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf4_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, + vint8mf4_t vector, vint8m1_t scalar, + size_t vl) { + return vredmax_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf2_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, + vint8mf2_t vector, vint8m1_t scalar, + size_t vl) { + return vredmax_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8m1_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, + vint8m1_t vector, vint8m1_t scalar, + size_t vl) { + return vredmax_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8m2_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, + vint8m2_t vector, vint8m1_t scalar, + size_t vl) { + return vredmax_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8m4_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, + vint8m4_t vector, vint8m1_t scalar, + size_t vl) { + return vredmax_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i8m8_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, + vint8m8_t vector, vint8m1_t scalar, + size_t vl) { + return vredmax_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i16mf4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, + vint16mf4_t vector, vint16m1_t scalar, + size_t vl) { + return vredmax_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i16mf2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, + vint16mf2_t vector, vint16m1_t scalar, + size_t vl) { + return vredmax_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i16m1_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, + vint16m1_t vector, vint16m1_t scalar, + size_t vl) { + return vredmax_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i16m2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, + vint16m2_t vector, vint16m1_t scalar, + size_t vl) { + return vredmax_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i16m4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, + vint16m4_t vector, vint16m1_t scalar, + size_t vl) { + return vredmax_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i16m8_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, + vint16m8_t vector, vint16m1_t scalar, + size_t vl) { + return vredmax_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i32mf2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, + vint32mf2_t vector, vint32m1_t scalar, + size_t vl) { + return vredmax_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i32m1_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, + vint32m1_t vector, vint32m1_t scalar, + size_t vl) { + return vredmax_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i32m2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, + vint32m2_t vector, vint32m1_t scalar, + size_t vl) { + return vredmax_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i32m4_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, + vint32m4_t vector, vint32m1_t scalar, + size_t vl) { + return vredmax_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i32m8_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, + vint32m8_t vector, vint32m1_t scalar, + size_t vl) { + return vredmax_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i64m1_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, + vint64m1_t vector, vint64m1_t scalar, + size_t vl) { + return vredmax_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i64m2_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, + vint64m2_t vector, vint64m1_t scalar, + size_t vl) { + return vredmax_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i64m4_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, + vint64m4_t vector, vint64m1_t scalar, + size_t vl) { + return vredmax_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmax_vs_i64m8_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, + vint64m8_t vector, vint64m1_t scalar, + size_t vl) { + return vredmax_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, + vuint8mf8_t vector, vuint8m1_t scalar, + size_t vl) { + return vredmaxu_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, + vuint8mf4_t vector, vuint8m1_t scalar, + size_t vl) { + return vredmaxu_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, + vuint8mf2_t vector, vuint8m1_t scalar, + size_t vl) { + return vredmaxu_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m1_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, + vuint8m1_t vector, vuint8m1_t scalar, + size_t vl) { + return vredmaxu_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m2_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, + vuint8m2_t vector, vuint8m1_t scalar, + size_t vl) { + return vredmaxu_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m4_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, + vuint8m4_t vector, vuint8m1_t scalar, + size_t vl) { + return vredmaxu_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m8_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, + vuint8m8_t vector, vuint8m1_t scalar, + size_t vl) { + return vredmaxu_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, + vuint16mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, + vuint16mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m1_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, + vuint16m1_t vector, + vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, + vuint16m2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, + vuint16m4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m8_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, + vuint16m8_t vector, + vuint16m1_t scalar, size_t vl) { + return vredmaxu_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, + vuint32mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m1_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, + vuint32m1_t vector, + vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, + vuint32m2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m4_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, + vuint32m4_t vector, + vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m8_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, + vuint32m8_t vector, + vuint32m1_t scalar, size_t vl) { + return vredmaxu_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m1_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, + vuint64m1_t vector, + vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m2_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, + vuint64m2_t vector, + vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m4_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, + vuint64m4_t vector, + vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m8_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, + vuint64m8_t vector, + vuint64m1_t scalar, size_t vl) { + return vredmaxu_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c new file mode 100644 index 0000000..e9e8cac3 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c @@ -0,0 +1,1372 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf8_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, + vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf8_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf4_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, + vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf4_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf2_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, + vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8mf2_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8m1_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, + vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m1_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8m2_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, + vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m2_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8m4_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, + vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m4_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8m8_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, + vint8m1_t scalar, size_t vl) { + return vredmin_vs_i8m8_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i16mf4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, + vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16mf4_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i16mf2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, + vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16mf2_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i16m1_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, + vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m1_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i16m2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, + vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m2_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i16m4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, + vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m4_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i16m8_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, + vint16m1_t scalar, size_t vl) { + return vredmin_vs_i16m8_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i32mf2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, + vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32mf2_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i32m1_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, + vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m1_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i32m2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, + vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m2_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i32m4_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, + vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m4_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i32m8_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, + vint32m1_t scalar, size_t vl) { + return vredmin_vs_i32m8_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i64m1_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, + vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m1_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i64m2_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, + vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m2_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i64m4_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, + vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m4_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i64m8_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, + vint64m1_t scalar, size_t vl) { + return vredmin_vs_i64m8_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf8_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, + vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf8_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf4_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, + vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf4_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf2_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, + vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8mf2_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8m1_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, + vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m1_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8m2_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, + vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m2_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8m4_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, + vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m4_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8m8_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, + vuint8m1_t scalar, size_t vl) { + return vredminu_vs_u8m8_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u16mf4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16mf4_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u16mf2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16mf2_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u16m1_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, + vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m1_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u16m2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m2_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u16m4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m4_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u16m8_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, + vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m8_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u32mf2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32mf2_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u32m1_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, + vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m1_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u32m2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m2_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u32m4_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, + vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m4_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u32m8_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, + vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m8_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u64m1_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, + vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m1_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u64m2_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, + vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m2_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u64m4_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, + vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m4_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u64m8_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, + vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m8_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf8_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, + vint8mf8_t vector, vint8m1_t scalar, + size_t vl) { + return vredmin_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf4_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, + vint8mf4_t vector, vint8m1_t scalar, + size_t vl) { + return vredmin_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf2_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, + vint8mf2_t vector, vint8m1_t scalar, + size_t vl) { + return vredmin_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8m1_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, + vint8m1_t vector, vint8m1_t scalar, + size_t vl) { + return vredmin_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8m2_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, + vint8m2_t vector, vint8m1_t scalar, + size_t vl) { + return vredmin_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8m4_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, + vint8m4_t vector, vint8m1_t scalar, + size_t vl) { + return vredmin_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i8m8_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, + vint8m8_t vector, vint8m1_t scalar, + size_t vl) { + return vredmin_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i16mf4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, + vint16mf4_t vector, vint16m1_t scalar, + size_t vl) { + return vredmin_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i16mf2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, + vint16mf2_t vector, vint16m1_t scalar, + size_t vl) { + return vredmin_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i16m1_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, + vint16m1_t vector, vint16m1_t scalar, + size_t vl) { + return vredmin_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i16m2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, + vint16m2_t vector, vint16m1_t scalar, + size_t vl) { + return vredmin_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i16m4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, + vint16m4_t vector, vint16m1_t scalar, + size_t vl) { + return vredmin_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i16m8_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, + vint16m8_t vector, vint16m1_t scalar, + size_t vl) { + return vredmin_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i32mf2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, + vint32mf2_t vector, vint32m1_t scalar, + size_t vl) { + return vredmin_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i32m1_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, + vint32m1_t vector, vint32m1_t scalar, + size_t vl) { + return vredmin_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i32m2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, + vint32m2_t vector, vint32m1_t scalar, + size_t vl) { + return vredmin_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i32m4_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, + vint32m4_t vector, vint32m1_t scalar, + size_t vl) { + return vredmin_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i32m8_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, + vint32m8_t vector, vint32m1_t scalar, + size_t vl) { + return vredmin_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i64m1_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, + vint64m1_t vector, vint64m1_t scalar, + size_t vl) { + return vredmin_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i64m2_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, + vint64m2_t vector, vint64m1_t scalar, + size_t vl) { + return vredmin_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i64m4_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, + vint64m4_t vector, vint64m1_t scalar, + size_t vl) { + return vredmin_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredmin_vs_i64m8_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, + vint64m8_t vector, vint64m1_t scalar, + size_t vl) { + return vredmin_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf8_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, + vuint8mf8_t vector, vuint8m1_t scalar, + size_t vl) { + return vredminu_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf4_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, + vuint8mf4_t vector, vuint8m1_t scalar, + size_t vl) { + return vredminu_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf2_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, + vuint8mf2_t vector, vuint8m1_t scalar, + size_t vl) { + return vredminu_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8m1_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, + vuint8m1_t vector, vuint8m1_t scalar, + size_t vl) { + return vredminu_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8m2_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, + vuint8m2_t vector, vuint8m1_t scalar, + size_t vl) { + return vredminu_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8m4_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, + vuint8m4_t vector, vuint8m1_t scalar, + size_t vl) { + return vredminu_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u8m8_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, + vuint8m8_t vector, vuint8m1_t scalar, + size_t vl) { + return vredminu_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u16mf4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, + vuint16mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u16mf2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, + vuint16mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u16m1_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, + vuint16m1_t vector, + vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u16m2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, + vuint16m2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u16m4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, + vuint16m4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u16m8_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, + vuint16m8_t vector, + vuint16m1_t scalar, size_t vl) { + return vredminu_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u32mf2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, + vuint32mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u32m1_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, + vuint32m1_t vector, + vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u32m2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, + vuint32m2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u32m4_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, + vuint32m4_t vector, + vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u32m8_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, + vuint32m8_t vector, + vuint32m1_t scalar, size_t vl) { + return vredminu_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u64m1_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, + vuint64m1_t vector, + vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u64m2_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, + vuint64m2_t vector, + vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u64m4_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, + vuint64m4_t vector, + vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredminu_vs_u64m8_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, + vuint64m8_t vector, + vuint64m1_t scalar, size_t vl) { + return vredminu_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c new file mode 100644 index 0000000..c933e4a --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c @@ -0,0 +1,1372 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vredor_vs_i8mf8_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, + vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf8_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i8mf4_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, + vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf4_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i8mf2_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, + vint8m1_t scalar, size_t vl) { + return vredor_vs_i8mf2_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i8m1_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, + vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m1_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i8m2_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, + vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m2_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i8m4_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, + vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m4_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i8m8_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, + vint8m1_t scalar, size_t vl) { + return vredor_vs_i8m8_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i16mf4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, + vint16m1_t scalar, size_t vl) { + return vredor_vs_i16mf4_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i16mf2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, + vint16m1_t scalar, size_t vl) { + return vredor_vs_i16mf2_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i16m1_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, + vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m1_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i16m2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, + vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m2_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i16m4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, + vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m4_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i16m8_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, + vint16m1_t scalar, size_t vl) { + return vredor_vs_i16m8_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i32mf2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, + vint32m1_t scalar, size_t vl) { + return vredor_vs_i32mf2_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i32m1_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, + vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m1_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i32m2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, + vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m2_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i32m4_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, + vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m4_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i32m8_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, + vint32m1_t scalar, size_t vl) { + return vredor_vs_i32m8_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i64m1_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, + vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m1_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i64m2_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, + vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m2_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i64m4_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, + vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m4_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i64m8_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, + vint64m1_t scalar, size_t vl) { + return vredor_vs_i64m8_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8mf8_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, + vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf8_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8mf4_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, + vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf4_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8mf2_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, + vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8mf2_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8m1_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, + vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m1_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8m2_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, + vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m2_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8m4_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, + vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m4_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8m8_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, + vuint8m1_t scalar, size_t vl) { + return vredor_vs_u8m8_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u16mf4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16mf4_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u16mf2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16mf2_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u16m1_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, + vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m1_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u16m2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m2_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u16m4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m4_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u16m8_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, + vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16m8_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u32mf2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32mf2_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u32m1_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, + vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m1_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u32m2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m2_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u32m4_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, + vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m4_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u32m8_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, + vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32m8_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u64m1_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, + vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m1_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u64m2_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, + vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m2_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u64m4_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, + vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m4_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u64m8_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, + vuint64m1_t scalar, size_t vl) { + return vredor_vs_u64m8_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i8mf8_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, + vint8mf8_t vector, vint8m1_t scalar, + size_t vl) { + return vredor_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i8mf4_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, + vint8mf4_t vector, vint8m1_t scalar, + size_t vl) { + return vredor_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i8mf2_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, + vint8mf2_t vector, vint8m1_t scalar, + size_t vl) { + return vredor_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i8m1_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, + vint8m1_t vector, vint8m1_t scalar, + size_t vl) { + return vredor_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i8m2_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, + vint8m2_t vector, vint8m1_t scalar, + size_t vl) { + return vredor_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i8m4_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, + vint8m4_t vector, vint8m1_t scalar, + size_t vl) { + return vredor_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i8m8_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, + vint8m8_t vector, vint8m1_t scalar, + size_t vl) { + return vredor_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i16mf4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, + vint16mf4_t vector, vint16m1_t scalar, + size_t vl) { + return vredor_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i16mf2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, + vint16mf2_t vector, vint16m1_t scalar, + size_t vl) { + return vredor_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i16m1_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, + vint16m1_t vector, vint16m1_t scalar, + size_t vl) { + return vredor_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i16m2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, + vint16m2_t vector, vint16m1_t scalar, + size_t vl) { + return vredor_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i16m4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, + vint16m4_t vector, vint16m1_t scalar, + size_t vl) { + return vredor_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i16m8_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, + vint16m8_t vector, vint16m1_t scalar, + size_t vl) { + return vredor_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i32mf2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, + vint32mf2_t vector, vint32m1_t scalar, + size_t vl) { + return vredor_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i32m1_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, + vint32m1_t vector, vint32m1_t scalar, + size_t vl) { + return vredor_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i32m2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, + vint32m2_t vector, vint32m1_t scalar, + size_t vl) { + return vredor_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i32m4_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, + vint32m4_t vector, vint32m1_t scalar, + size_t vl) { + return vredor_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i32m8_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, + vint32m8_t vector, vint32m1_t scalar, + size_t vl) { + return vredor_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i64m1_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, + vint64m1_t vector, vint64m1_t scalar, + size_t vl) { + return vredor_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i64m2_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, + vint64m2_t vector, vint64m1_t scalar, + size_t vl) { + return vredor_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i64m4_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, + vint64m4_t vector, vint64m1_t scalar, + size_t vl) { + return vredor_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_i64m8_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, + vint64m8_t vector, vint64m1_t scalar, + size_t vl) { + return vredor_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8mf8_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, + vuint8mf8_t vector, vuint8m1_t scalar, + size_t vl) { + return vredor_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8mf4_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, + vuint8mf4_t vector, vuint8m1_t scalar, + size_t vl) { + return vredor_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8mf2_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, + vuint8mf2_t vector, vuint8m1_t scalar, + size_t vl) { + return vredor_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8m1_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, + vuint8m1_t vector, vuint8m1_t scalar, + size_t vl) { + return vredor_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8m2_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, + vuint8m2_t vector, vuint8m1_t scalar, + size_t vl) { + return vredor_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8m4_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, + vuint8m4_t vector, vuint8m1_t scalar, + size_t vl) { + return vredor_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u8m8_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, + vuint8m8_t vector, vuint8m1_t scalar, + size_t vl) { + return vredor_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u16mf4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, + vuint16mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u16mf2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, + vuint16mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredor_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u16m1_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, + vuint16m1_t vector, vuint16m1_t scalar, + size_t vl) { + return vredor_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u16m2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, + vuint16m2_t vector, vuint16m1_t scalar, + size_t vl) { + return vredor_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u16m4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, + vuint16m4_t vector, vuint16m1_t scalar, + size_t vl) { + return vredor_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u16m8_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, + vuint16m8_t vector, vuint16m1_t scalar, + size_t vl) { + return vredor_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u32mf2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, + vuint32mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredor_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u32m1_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, + vuint32m1_t vector, vuint32m1_t scalar, + size_t vl) { + return vredor_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u32m2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, + vuint32m2_t vector, vuint32m1_t scalar, + size_t vl) { + return vredor_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u32m4_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, + vuint32m4_t vector, vuint32m1_t scalar, + size_t vl) { + return vredor_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u32m8_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, + vuint32m8_t vector, vuint32m1_t scalar, + size_t vl) { + return vredor_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u64m1_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, + vuint64m1_t vector, vuint64m1_t scalar, + size_t vl) { + return vredor_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u64m2_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, + vuint64m2_t vector, vuint64m1_t scalar, + size_t vl) { + return vredor_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u64m4_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, + vuint64m4_t vector, vuint64m1_t scalar, + size_t vl) { + return vredor_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredor_vs_u64m8_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, + vuint64m8_t vector, vuint64m1_t scalar, + size_t vl) { + return vredor_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c new file mode 100644 index 0000000..e574243 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c @@ -0,0 +1,1372 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf8_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, + vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf8_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf4_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, + vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf4_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf2_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, + vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8mf2_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8m1_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, + vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m1_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8m2_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, + vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m2_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8m4_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, + vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m4_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8m8_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, + vint8m1_t scalar, size_t vl) { + return vredsum_vs_i8m8_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i16mf4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, + vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16mf4_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i16mf2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, + vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16mf2_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i16m1_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, + vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m1_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i16m2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, + vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m2_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i16m4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, + vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m4_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i16m8_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, + vint16m1_t scalar, size_t vl) { + return vredsum_vs_i16m8_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i32mf2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, + vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32mf2_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i32m1_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, + vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m1_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i32m2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, + vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m2_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i32m4_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, + vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m4_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i32m8_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, + vint32m1_t scalar, size_t vl) { + return vredsum_vs_i32m8_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i64m1_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, + vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m1_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i64m2_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, + vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m2_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i64m4_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, + vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m4_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i64m8_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, + vint64m1_t scalar, size_t vl) { + return vredsum_vs_i64m8_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf8_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, + vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf8_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf4_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, + vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf4_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf2_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, + vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8mf2_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8m1_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, + vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m1_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8m2_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, + vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m2_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8m4_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, + vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m4_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8m8_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, + vuint8m1_t scalar, size_t vl) { + return vredsum_vs_u8m8_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u16mf4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16mf4_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u16mf2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16mf2_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u16m1_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, + vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m1_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u16m2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m2_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u16m4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m4_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u16m8_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, + vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m8_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u32mf2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32mf2_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u32m1_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, + vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m1_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u32m2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m2_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u32m4_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, + vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m4_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u32m8_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, + vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m8_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u64m1_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, + vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m1_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u64m2_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, + vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m2_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u64m4_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, + vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m4_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u64m8_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, + vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m8_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf8_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, + vint8mf8_t vector, vint8m1_t scalar, + size_t vl) { + return vredsum_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf4_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, + vint8mf4_t vector, vint8m1_t scalar, + size_t vl) { + return vredsum_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf2_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, + vint8mf2_t vector, vint8m1_t scalar, + size_t vl) { + return vredsum_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8m1_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, + vint8m1_t vector, vint8m1_t scalar, + size_t vl) { + return vredsum_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8m2_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, + vint8m2_t vector, vint8m1_t scalar, + size_t vl) { + return vredsum_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8m4_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, + vint8m4_t vector, vint8m1_t scalar, + size_t vl) { + return vredsum_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i8m8_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, + vint8m8_t vector, vint8m1_t scalar, + size_t vl) { + return vredsum_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i16mf4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, + vint16mf4_t vector, vint16m1_t scalar, + size_t vl) { + return vredsum_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i16mf2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, + vint16mf2_t vector, vint16m1_t scalar, + size_t vl) { + return vredsum_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i16m1_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, + vint16m1_t vector, vint16m1_t scalar, + size_t vl) { + return vredsum_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i16m2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, + vint16m2_t vector, vint16m1_t scalar, + size_t vl) { + return vredsum_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i16m4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, + vint16m4_t vector, vint16m1_t scalar, + size_t vl) { + return vredsum_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i16m8_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, + vint16m8_t vector, vint16m1_t scalar, + size_t vl) { + return vredsum_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i32mf2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, + vint32mf2_t vector, vint32m1_t scalar, + size_t vl) { + return vredsum_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i32m1_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, + vint32m1_t vector, vint32m1_t scalar, + size_t vl) { + return vredsum_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i32m2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, + vint32m2_t vector, vint32m1_t scalar, + size_t vl) { + return vredsum_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i32m4_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, + vint32m4_t vector, vint32m1_t scalar, + size_t vl) { + return vredsum_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i32m8_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, + vint32m8_t vector, vint32m1_t scalar, + size_t vl) { + return vredsum_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i64m1_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, + vint64m1_t vector, vint64m1_t scalar, + size_t vl) { + return vredsum_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i64m2_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, + vint64m2_t vector, vint64m1_t scalar, + size_t vl) { + return vredsum_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i64m4_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, + vint64m4_t vector, vint64m1_t scalar, + size_t vl) { + return vredsum_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_i64m8_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, + vint64m8_t vector, vint64m1_t scalar, + size_t vl) { + return vredsum_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf8_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, + vuint8mf8_t vector, vuint8m1_t scalar, + size_t vl) { + return vredsum_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf4_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, + vuint8mf4_t vector, vuint8m1_t scalar, + size_t vl) { + return vredsum_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf2_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, + vuint8mf2_t vector, vuint8m1_t scalar, + size_t vl) { + return vredsum_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8m1_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, + vuint8m1_t vector, vuint8m1_t scalar, + size_t vl) { + return vredsum_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8m2_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, + vuint8m2_t vector, vuint8m1_t scalar, + size_t vl) { + return vredsum_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8m4_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, + vuint8m4_t vector, vuint8m1_t scalar, + size_t vl) { + return vredsum_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u8m8_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, + vuint8m8_t vector, vuint8m1_t scalar, + size_t vl) { + return vredsum_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u16mf4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, + vuint16mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u16mf2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, + vuint16mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u16m1_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, + vuint16m1_t vector, + vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u16m2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, + vuint16m2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u16m4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, + vuint16m4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u16m8_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, + vuint16m8_t vector, + vuint16m1_t scalar, size_t vl) { + return vredsum_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u32mf2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, + vuint32mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u32m1_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, + vuint32m1_t vector, + vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u32m2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, + vuint32m2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u32m4_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, + vuint32m4_t vector, + vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u32m8_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, + vuint32m8_t vector, + vuint32m1_t scalar, size_t vl) { + return vredsum_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u64m1_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, + vuint64m1_t vector, + vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u64m2_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, + vuint64m2_t vector, + vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u64m4_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, + vuint64m4_t vector, + vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredsum_vs_u64m8_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, + vuint64m8_t vector, + vuint64m1_t scalar, size_t vl) { + return vredsum_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c new file mode 100644 index 0000000..04de6d0 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c @@ -0,0 +1,1372 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf8_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, + vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf8_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf4_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, + vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf4_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf2_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, + vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8mf2_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8m1_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, + vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m1_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8m2_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, + vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m2_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8m4_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, + vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m4_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8m8_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, + vint8m1_t scalar, size_t vl) { + return vredxor_vs_i8m8_i8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i16mf4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, + vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16mf4_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i16mf2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, + vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16mf2_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i16m1_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, + vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m1_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i16m2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, + vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m2_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i16m4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, + vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m4_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i16m8_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, + vint16m1_t scalar, size_t vl) { + return vredxor_vs_i16m8_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i32mf2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, + vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32mf2_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i32m1_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, + vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m1_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i32m2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, + vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m2_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i32m4_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, + vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m4_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i32m8_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, + vint32m1_t scalar, size_t vl) { + return vredxor_vs_i32m8_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i64m1_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, + vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m1_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i64m2_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, + vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m2_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i64m4_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, + vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m4_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i64m8_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, + vint64m1_t scalar, size_t vl) { + return vredxor_vs_i64m8_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf8_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, + vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf8_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf4_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, + vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf4_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf2_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, + vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8mf2_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8m1_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, + vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m1_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8m2_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, + vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m2_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8m4_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, + vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m4_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8m8_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, + vuint8m1_t scalar, size_t vl) { + return vredxor_vs_u8m8_u8m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u16mf4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16mf4_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u16mf2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16mf2_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u16m1_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, + vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m1_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u16m2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m2_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u16m4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m4_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u16m8_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, + vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m8_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u32mf2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32mf2_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u32m1_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, + vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m1_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u32m2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m2_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u32m4_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, + vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m4_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u32m8_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, + vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m8_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u64m1_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, + vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m1_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u64m2_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, + vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m2_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u64m4_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, + vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m4_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u64m8_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, + vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m8_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf8_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, + vint8mf8_t vector, vint8m1_t scalar, + size_t vl) { + return vredxor_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf4_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, + vint8mf4_t vector, vint8m1_t scalar, + size_t vl) { + return vredxor_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf2_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, + vint8mf2_t vector, vint8m1_t scalar, + size_t vl) { + return vredxor_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8m1_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, + vint8m1_t vector, vint8m1_t scalar, + size_t vl) { + return vredxor_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8m2_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, + vint8m2_t vector, vint8m1_t scalar, + size_t vl) { + return vredxor_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8m4_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, + vint8m4_t vector, vint8m1_t scalar, + size_t vl) { + return vredxor_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i8m8_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, + vint8m8_t vector, vint8m1_t scalar, + size_t vl) { + return vredxor_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i16mf4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, + vint16mf4_t vector, vint16m1_t scalar, + size_t vl) { + return vredxor_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i16mf2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, + vint16mf2_t vector, vint16m1_t scalar, + size_t vl) { + return vredxor_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i16m1_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, + vint16m1_t vector, vint16m1_t scalar, + size_t vl) { + return vredxor_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i16m2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, + vint16m2_t vector, vint16m1_t scalar, + size_t vl) { + return vredxor_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i16m4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, + vint16m4_t vector, vint16m1_t scalar, + size_t vl) { + return vredxor_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i16m8_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, + vint16m8_t vector, vint16m1_t scalar, + size_t vl) { + return vredxor_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i32mf2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, + vint32mf2_t vector, vint32m1_t scalar, + size_t vl) { + return vredxor_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i32m1_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, + vint32m1_t vector, vint32m1_t scalar, + size_t vl) { + return vredxor_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i32m2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, + vint32m2_t vector, vint32m1_t scalar, + size_t vl) { + return vredxor_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i32m4_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, + vint32m4_t vector, vint32m1_t scalar, + size_t vl) { + return vredxor_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i32m8_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, + vint32m8_t vector, vint32m1_t scalar, + size_t vl) { + return vredxor_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i64m1_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, + vint64m1_t vector, vint64m1_t scalar, + size_t vl) { + return vredxor_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i64m2_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, + vint64m2_t vector, vint64m1_t scalar, + size_t vl) { + return vredxor_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i64m4_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, + vint64m4_t vector, vint64m1_t scalar, + size_t vl) { + return vredxor_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_i64m8_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, + vint64m8_t vector, vint64m1_t scalar, + size_t vl) { + return vredxor_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf8_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, + vuint8mf8_t vector, vuint8m1_t scalar, + size_t vl) { + return vredxor_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf4_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, + vuint8mf4_t vector, vuint8m1_t scalar, + size_t vl) { + return vredxor_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf2_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, + vuint8mf2_t vector, vuint8m1_t scalar, + size_t vl) { + return vredxor_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8m1_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, + vuint8m1_t vector, vuint8m1_t scalar, + size_t vl) { + return vredxor_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8m2_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, + vuint8m2_t vector, vuint8m1_t scalar, + size_t vl) { + return vredxor_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8m4_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, + vuint8m4_t vector, vuint8m1_t scalar, + size_t vl) { + return vredxor_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u8m8_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, + vuint8m8_t vector, vuint8m1_t scalar, + size_t vl) { + return vredxor_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u16mf4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, + vuint16mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u16mf2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, + vuint16mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u16m1_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, + vuint16m1_t vector, + vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u16m2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, + vuint16m2_t vector, + vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u16m4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, + vuint16m4_t vector, + vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u16m8_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, + vuint16m8_t vector, + vuint16m1_t scalar, size_t vl) { + return vredxor_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u32mf2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, + vuint32mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u32m1_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, + vuint32m1_t vector, + vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u32m2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, + vuint32m2_t vector, + vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u32m4_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, + vuint32m4_t vector, + vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u32m8_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, + vuint32m8_t vector, + vuint32m1_t scalar, size_t vl) { + return vredxor_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u64m1_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, + vuint64m1_t vector, + vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u64m2_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, + vuint64m2_t vector, + vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u64m4_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, + vuint64m4_t vector, + vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vredxor_vs_u64m8_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, + vuint64m8_t vector, + vuint64m1_t scalar, size_t vl) { + return vredxor_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c new file mode 100644 index 0000000..3ccba3e --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c @@ -0,0 +1,1124 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf8_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf8_i16m1(vint16m1_t dst, vint8mf8_t vector, + vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf8_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf4_i16m1(vint16m1_t dst, vint8mf4_t vector, + vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf4_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf2_i16m1(vint16m1_t dst, vint8mf2_t vector, + vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8mf2_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m1_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint16m1_t dst, vint8m1_t vector, + vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m1_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint16m1_t dst, vint8m2_t vector, + vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m2_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint16m1_t dst, vint8m4_t vector, + vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m4_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m8_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint16m1_t dst, vint8m8_t vector, + vint16m1_t scalar, size_t vl) { + return vwredsum_vs_i8m8_i16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf4_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16mf4_i32m1(vint32m1_t dst, vint16mf4_t vector, + vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16mf4_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16mf2_i32m1(vint32m1_t dst, vint16mf2_t vector, + vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16mf2_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m1_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint32m1_t dst, vint16m1_t vector, + vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m1_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint32m1_t dst, vint16m2_t vector, + vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m2_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m4_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint32m1_t dst, vint16m4_t vector, + vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m4_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m8_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint32m1_t dst, vint16m8_t vector, + vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16m8_i32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i32mf2_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32mf2_i64m1(vint64m1_t dst, vint32mf2_t vector, + vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32mf2_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m1_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint64m1_t dst, vint32m1_t vector, + vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m1_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m2_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint64m1_t dst, vint32m2_t vector, + vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m2_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m4_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint64m1_t dst, vint32m4_t vector, + vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m4_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m8_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint64m1_t dst, vint32m8_t vector, + vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32m8_i64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf8_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint16m1_t dst, vuint8mf8_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf8_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint16m1_t dst, vuint8mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf4_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint16m1_t dst, vuint8mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf2_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m1_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint16m1_t dst, vuint8m1_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m1_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint16m1_t dst, vuint8m2_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m2_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint16m1_t dst, vuint8m4_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m4_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m8_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint16m1_t dst, vuint8m8_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m8_u16m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf4_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint32m1_t dst, vuint16mf4_t vector, + vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16mf4_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint32m1_t dst, vuint16mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16mf2_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m1_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint32m1_t dst, vuint16m1_t vector, + vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m1_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint32m1_t dst, vuint16m2_t vector, + vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m2_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m4_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint32m1_t dst, vuint16m4_t vector, + vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m4_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m8_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint32m1_t dst, vuint16m8_t vector, + vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m8_u32m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32mf2_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint64m1_t dst, vuint32mf2_t vector, + vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32mf2_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m1_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint64m1_t dst, vuint32m1_t vector, + vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m1_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m2_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint64m1_t dst, vuint32m2_t vector, + vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m2_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m4_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint64m1_t dst, vuint32m4_t vector, + vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m4_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m8_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint64m1_t dst, vuint32m8_t vector, + vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m8_u64m1(dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf8_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint16m1_t dst, + vint8mf8_t vector, vint16m1_t scalar, + size_t vl) { + return vwredsum_vs_i8mf8_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint16m1_t dst, + vint8mf4_t vector, vint16m1_t scalar, + size_t vl) { + return vwredsum_vs_i8mf4_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint16m1_t dst, + vint8mf2_t vector, vint16m1_t scalar, + size_t vl) { + return vwredsum_vs_i8mf2_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m1_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint16m1_t dst, + vint8m1_t vector, vint16m1_t scalar, + size_t vl) { + return vwredsum_vs_i8m1_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m2_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint16m1_t dst, + vint8m2_t vector, vint16m1_t scalar, + size_t vl) { + return vwredsum_vs_i8m2_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m4_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint16m1_t dst, + vint8m4_t vector, vint16m1_t scalar, + size_t vl) { + return vwredsum_vs_i8m4_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m8_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint16m1_t dst, + vint8m8_t vector, vint16m1_t scalar, + size_t vl) { + return vwredsum_vs_i8m8_i16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf4_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint32m1_t dst, + vint16mf4_t vector, + vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16mf4_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint32m1_t dst, + vint16mf2_t vector, + vint32m1_t scalar, size_t vl) { + return vwredsum_vs_i16mf2_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m1_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint32m1_t dst, + vint16m1_t vector, vint32m1_t scalar, + size_t vl) { + return vwredsum_vs_i16m1_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m2_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint32m1_t dst, + vint16m2_t vector, vint32m1_t scalar, + size_t vl) { + return vwredsum_vs_i16m2_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m4_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint32m1_t dst, + vint16m4_t vector, vint32m1_t scalar, + size_t vl) { + return vwredsum_vs_i16m4_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m8_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint32m1_t dst, + vint16m8_t vector, vint32m1_t scalar, + size_t vl) { + return vwredsum_vs_i16m8_i32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i32mf2_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint64m1_t dst, + vint32mf2_t vector, + vint64m1_t scalar, size_t vl) { + return vwredsum_vs_i32mf2_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m1_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint64m1_t dst, + vint32m1_t vector, vint64m1_t scalar, + size_t vl) { + return vwredsum_vs_i32m1_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m2_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint64m1_t dst, + vint32m2_t vector, vint64m1_t scalar, + size_t vl) { + return vwredsum_vs_i32m2_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m4_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint64m1_t dst, + vint32m4_t vector, vint64m1_t scalar, + size_t vl) { + return vwredsum_vs_i32m4_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m8_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint64m1_t dst, + vint32m8_t vector, vint64m1_t scalar, + size_t vl) { + return vwredsum_vs_i32m8_i64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint16m1_t dst, + vuint8mf8_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf8_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint16m1_t dst, + vuint8mf4_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf4_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint16m1_t dst, + vuint8mf2_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8mf2_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m1_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint16m1_t dst, + vuint8m1_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m1_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m2_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint16m1_t dst, + vuint8m2_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m2_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m4_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint16m1_t dst, + vuint8m4_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m4_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m8_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint16m1_t dst, + vuint8m8_t vector, + vuint16m1_t scalar, size_t vl) { + return vwredsumu_vs_u8m8_u16m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint32m1_t dst, + vuint16mf4_t vector, + vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16mf4_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint32m1_t dst, + vuint16mf2_t vector, + vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16mf2_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m1_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint32m1_t dst, + vuint16m1_t vector, + vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m1_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m2_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint32m1_t dst, + vuint16m2_t vector, + vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m2_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m4_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint32m1_t dst, + vuint16m4_t vector, + vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m4_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m8_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint32m1_t dst, + vuint16m8_t vector, + vuint32m1_t scalar, size_t vl) { + return vwredsumu_vs_u16m8_u32m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint64m1_t dst, + vuint32mf2_t vector, + vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32mf2_u64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m1_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint64m1_t dst, + vuint32m1_t vector, + vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m1_u64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m2_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint64m1_t dst, + vuint32m2_t vector, + vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m2_u64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m4_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint64m1_t dst, + vuint32m4_t vector, + vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m4_u64m1_m(mask, dst, vector, scalar, vl); +} + +// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m8_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint64m1_t dst, + vuint32m8_t vector, + vuint64m1_t scalar, size_t vl) { + return vwredsumu_vs_u32m8_u64m1_m(mask, dst, vector, scalar, vl); +} -- 2.7.4