From 98a3ff9d05250807867be3dd82fdb10ee5c4030a Mon Sep 17 00:00:00 2001 From: Zakk Chen Date: Thu, 8 Apr 2021 08:09:42 -0700 Subject: [PATCH] [RISCV][Clang] Add more RVV Floating-Point intrinsic functions. Support below instructions. 1. Vector Widening Floating-Point Add/Subtract Instructions 2. Vector Widening Floating-Point Multiply 3. Vector Single-Width Floating-Point Fused Multiply-Add Instructions 4. Vector Widening Floating-Point Fused Multiply-Add Instructions 5. Vector Floating-Point Compare Instructions Reviewed By: craig.topper, HsiangKai Authored-by: Roger Ferrer Ibanez Co-Authored-by: Zakk Chen Differential Revision: https://reviews.llvm.org/D99669 --- clang/include/clang/Basic/riscv_vector.td | 119 +++-- .../RISCV/rvv-intrinsics-overloaded/vfmacc.c | 560 ++++++++++++++++++++ .../RISCV/rvv-intrinsics-overloaded/vfmadd.c | 560 ++++++++++++++++++++ .../RISCV/rvv-intrinsics-overloaded/vfmsac.c | 560 ++++++++++++++++++++ .../RISCV/rvv-intrinsics-overloaded/vfmsub.c | 560 ++++++++++++++++++++ .../RISCV/rvv-intrinsics-overloaded/vfnmacc.c | 561 +++++++++++++++++++++ .../RISCV/rvv-intrinsics-overloaded/vfnmadd.c | 561 +++++++++++++++++++++ .../RISCV/rvv-intrinsics-overloaded/vfnmsac.c | 561 +++++++++++++++++++++ .../RISCV/rvv-intrinsics-overloaded/vfnmsub.c | 561 +++++++++++++++++++++ .../RISCV/rvv-intrinsics-overloaded/vfwadd.c | 491 ++++++++++++++++++ .../RISCV/rvv-intrinsics-overloaded/vfwmacc.c | 255 ++++++++++ .../RISCV/rvv-intrinsics-overloaded/vfwmsac.c | 255 ++++++++++ .../RISCV/rvv-intrinsics-overloaded/vfwmul.c | 251 +++++++++ .../RISCV/rvv-intrinsics-overloaded/vfwnmacc.c | 255 ++++++++++ .../RISCV/rvv-intrinsics-overloaded/vfwnmsac.c | 255 ++++++++++ .../RISCV/rvv-intrinsics-overloaded/vfwsub.c | 491 ++++++++++++++++++ .../RISCV/rvv-intrinsics-overloaded/vmfeq.c | 548 ++++++++++++++++++++ .../RISCV/rvv-intrinsics-overloaded/vmfge.c | 272 ++++++++++ .../RISCV/rvv-intrinsics-overloaded/vmfgt.c | 272 ++++++++++ .../RISCV/rvv-intrinsics-overloaded/vmfle.c | 548 ++++++++++++++++++++ .../RISCV/rvv-intrinsics-overloaded/vmflt.c | 548 ++++++++++++++++++++ .../RISCV/rvv-intrinsics-overloaded/vmfne.c | 548 ++++++++++++++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c | 560 ++++++++++++++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c | 560 ++++++++++++++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c | 560 ++++++++++++++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c | 560 ++++++++++++++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c | 561 +++++++++++++++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c | 561 +++++++++++++++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c | 561 +++++++++++++++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c | 561 +++++++++++++++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c | 491 ++++++++++++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c | 255 ++++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c | 255 ++++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c | 251 +++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c | 255 ++++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c | 255 ++++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c | 491 ++++++++++++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c | 548 ++++++++++++++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c | 272 ++++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c | 272 ++++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c | 548 ++++++++++++++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c | 548 ++++++++++++++++++++ clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c | 548 ++++++++++++++++++++ 43 files changed, 19034 insertions(+), 31 deletions(-) create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td index 52188ba..fcbc941 100644 --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -283,13 +283,6 @@ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { ["wx", "Uv", "UvUwz"]]>; } -multiclass RVVIntTerBuiltinSet - : RVVOutOp1BuiltinSet; - multiclass RVVCarryinBuiltinSet : RVVOutOp1BuiltinSet + : RVVBuiltin { + let IRName = intrinsic_name; + let IRNameMask = intrinsic_name # "_mask"; + let MangledName = NAME; + let IntrinsicTypes = [-1, 0]; +} + +let HasMaskedOffOperand = false in { + multiclass RVVIntTerBuiltinSet { + defm "" : RVVOutOp1BuiltinSet; + } + multiclass RVVFloatingTerBuiltinSet { + defm "" : RVVOutOp1BuiltinSet; + } +} + +let HasMaskedOffOperand = false, Log2LMUL = [-1, 0, 1, 2] in { + multiclass RVVFloatingWidenTerBuiltinSet { + defm "" : RVVOutOp1Op2BuiltinSet; + } +} + multiclass RVVFloatingBinBuiltinSet : RVVOutOp1BuiltinSet; -class RVVIntExt - : RVVBuiltin { - let IRName = intrinsic_name; - let IRNameMask = intrinsic_name # "_mask"; - let MangledName = NAME; - let IntrinsicTypes = [-1, 0]; -} +multiclass RVVFloatingMaskOutBuiltinSet + : RVVOp0Op1BuiltinSet; + +multiclass RVVFloatingMaskOutVFBuiltinSet + : RVVOp0Op1BuiltinSet; // For widen operation which has different mangling name. multiclass RVVWidenBuiltinSet; +multiclass RVVFloatingWidenBinBuiltinSet + : RVVWidenBuiltinSet; + +multiclass RVVFloatingWidenOp0BinBuiltinSet + : RVVWidenWOp0BuiltinSet; + defvar TypeList = ["c","s","i","l","f","d"]; defvar EEWList = [["8", "(Log2EEW:3)"], ["16", "(Log2EEW:4)"], @@ -715,12 +750,10 @@ defm vwmulsu : RVVOutOp0Op1BuiltinSet<"vwmulsu", "csi", } // 12.13. Vector Single-Width Integer Multiply-Add Instructions -let HasMaskedOffOperand = false in { - defm vmacc : RVVIntTerBuiltinSet; - defm vnmsac : RVVIntTerBuiltinSet; - defm vmadd : RVVIntTerBuiltinSet; - defm vnmsub : RVVIntTerBuiltinSet; -} +defm vmacc : RVVIntTerBuiltinSet; +defm vnmsac : RVVIntTerBuiltinSet; +defm vmadd : RVVIntTerBuiltinSet; +defm vnmsub : RVVIntTerBuiltinSet; // 12.14. Vector Widening Integer Multiply-Add Instructions let HasMaskedOffOperand = false, @@ -770,26 +803,45 @@ defm vnclip : RVVSignedNShiftBuiltinSet; // 14. Vector Floating-Point Instructions // 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions -defm vfadd : RVVFloatingBinBuiltinSet; -defm vfsub : RVVFloatingBinBuiltinSet; +defm vfadd : RVVFloatingBinBuiltinSet; +defm vfsub : RVVFloatingBinBuiltinSet; defm vfrsub : RVVFloatingBinVFBuiltinSet; // 14.3. Vector Widening Floating-Point Add/Subtract Instructions -// TODO +// Widening FP add/subtract, 2*SEW = SEW +/- SEW +defm vfwadd : RVVFloatingWidenBinBuiltinSet; +defm vfwsub : RVVFloatingWidenBinBuiltinSet; +// Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW +defm vfwadd : RVVFloatingWidenOp0BinBuiltinSet; +defm vfwsub : RVVFloatingWidenOp0BinBuiltinSet; // 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions -defm vfmul : RVVFloatingBinBuiltinSet; -defm vfdiv : RVVFloatingBinBuiltinSet; +defm vfmul : RVVFloatingBinBuiltinSet; +defm vfdiv : RVVFloatingBinBuiltinSet; defm vfrdiv : RVVFloatingBinVFBuiltinSet; // 14.5. Vector Widening Floating-Point Multiply -// TODO +let Log2LMUL = [-1, 0, 1, 2] in { + defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "f", + [["vv", "w", "wvv"], + ["vf", "w", "wve"]]>; +} // 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions -// TODO +defm vfmacc : RVVFloatingTerBuiltinSet; +defm vfnmacc : RVVFloatingTerBuiltinSet; +defm vfmsac : RVVFloatingTerBuiltinSet; +defm vfnmsac : RVVFloatingTerBuiltinSet; +defm vfmadd : RVVFloatingTerBuiltinSet; +defm vfnmadd : RVVFloatingTerBuiltinSet; +defm vfmsub : RVVFloatingTerBuiltinSet; +defm vfnmsub : RVVFloatingTerBuiltinSet; // 14.7. Vector Widening Floating-Point Fused Multiply-Add Instructions -// TODO +defm vfwmacc : RVVFloatingWidenTerBuiltinSet; +defm vfwnmacc : RVVFloatingWidenTerBuiltinSet; +defm vfwmsac : RVVFloatingWidenTerBuiltinSet; +defm vfwnmsac : RVVFloatingWidenTerBuiltinSet; // 14.8. Vector Floating-Point Square-Root Instruction // TODO @@ -805,12 +857,17 @@ defm vfmin : RVVFloatingBinBuiltinSet; defm vfmax : RVVFloatingBinBuiltinSet; // 14.12. Vector Floating-Point Sign-Injection Instructions -defm vfsgnj : RVVFloatingBinBuiltinSet; +defm vfsgnj : RVVFloatingBinBuiltinSet; defm vfsgnjn : RVVFloatingBinBuiltinSet; defm vfsgnjx : RVVFloatingBinBuiltinSet; // 14.13. Vector Floating-Point Compare Instructions -// TODO +defm vmfeq : RVVFloatingMaskOutBuiltinSet; +defm vmfne : RVVFloatingMaskOutBuiltinSet; +defm vmflt : RVVFloatingMaskOutBuiltinSet; +defm vmfle : RVVFloatingMaskOutBuiltinSet; +defm vmfgt : RVVFloatingMaskOutVFBuiltinSet; +defm vmfge : RVVFloatingMaskOutVFBuiltinSet; // 14.14. Vector Floating-Point Classify Instruction // TODO diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c new file mode 100644 index 0000000..130f599 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c @@ -0,0 +1,560 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1, + vfloat32mf2_t op2, size_t vl) { + return vfmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vf_f32mf2(vfloat32mf2_t acc, float op1, + vfloat32mf2_t op2, size_t vl) { + return vfmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmacc_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1, + vfloat32m1_t op2, size_t vl) { + return vfmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmacc_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2, + size_t vl) { + return vfmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmacc_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1, + vfloat32m2_t op2, size_t vl) { + return vfmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmacc_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2, + size_t vl) { + return vfmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmacc_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1, + vfloat32m4_t op2, size_t vl) { + return vfmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmacc_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2, + size_t vl) { + return vfmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmacc_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1, + vfloat32m8_t op2, size_t vl) { + return vfmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmacc_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2, + size_t vl) { + return vfmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmacc_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1, + vfloat64m1_t op2, size_t vl) { + return vfmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmacc_vf_f64m1(vfloat64m1_t acc, double op1, + vfloat64m1_t op2, size_t vl) { + return vfmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmacc_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1, + vfloat64m2_t op2, size_t vl) { + return vfmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmacc_vf_f64m2(vfloat64m2_t acc, double op1, + vfloat64m2_t op2, size_t vl) { + return vfmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmacc_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1, + vfloat64m4_t op2, size_t vl) { + return vfmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmacc_vf_f64m4(vfloat64m4_t acc, double op1, + vfloat64m4_t op2, size_t vl) { + return vfmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmacc_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1, + vfloat64m8_t op2, size_t vl) { + return vfmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmacc_vf_f64m8(vfloat64m8_t acc, double op1, + vfloat64m8_t op2, size_t vl) { + return vfmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + float op1, vfloat32mf2_t op2, size_t vl) { + return vfmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, + vfloat32m1_t op2, size_t vl) { + return vfmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, + vfloat32m2_t op2, size_t vl) { + return vfmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, + vfloat32m8_t op1, vfloat32m8_t op2, + size_t vl) { + return vfmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, + vfloat32m8_t op2, size_t vl) { + return vfmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vfmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + double op1, vfloat64m1_t op2, size_t vl) { + return vfmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vfmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + double op1, vfloat64m2_t op2, size_t vl) { + return vfmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vfmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + double op1, vfloat64m4_t op2, size_t vl) { + return vfmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + vfloat64m8_t op1, vfloat64m8_t op2, + size_t vl) { + return vfmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1, + vfloat64m8_t op2, size_t vl) { + return vfmacc(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c new file mode 100644 index 0000000..fbc3a42 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c @@ -0,0 +1,560 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1, + vfloat32mf2_t op2, size_t vl) { + return vfmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vf_f32mf2(vfloat32mf2_t acc, float op1, + vfloat32mf2_t op2, size_t vl) { + return vfmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmadd_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1, + vfloat32m1_t op2, size_t vl) { + return vfmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmadd_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2, + size_t vl) { + return vfmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmadd_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1, + vfloat32m2_t op2, size_t vl) { + return vfmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmadd_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2, + size_t vl) { + return vfmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmadd_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1, + vfloat32m4_t op2, size_t vl) { + return vfmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmadd_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2, + size_t vl) { + return vfmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmadd_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1, + vfloat32m8_t op2, size_t vl) { + return vfmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmadd_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2, + size_t vl) { + return vfmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmadd_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1, + vfloat64m1_t op2, size_t vl) { + return vfmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmadd_vf_f64m1(vfloat64m1_t acc, double op1, + vfloat64m1_t op2, size_t vl) { + return vfmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmadd_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1, + vfloat64m2_t op2, size_t vl) { + return vfmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmadd_vf_f64m2(vfloat64m2_t acc, double op1, + vfloat64m2_t op2, size_t vl) { + return vfmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmadd_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1, + vfloat64m4_t op2, size_t vl) { + return vfmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmadd_vf_f64m4(vfloat64m4_t acc, double op1, + vfloat64m4_t op2, size_t vl) { + return vfmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmadd_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1, + vfloat64m8_t op2, size_t vl) { + return vfmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmadd_vf_f64m8(vfloat64m8_t acc, double op1, + vfloat64m8_t op2, size_t vl) { + return vfmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + float op1, vfloat32mf2_t op2, size_t vl) { + return vfmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, + vfloat32m1_t op2, size_t vl) { + return vfmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, + vfloat32m2_t op2, size_t vl) { + return vfmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, + vfloat32m8_t op1, vfloat32m8_t op2, + size_t vl) { + return vfmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, + vfloat32m8_t op2, size_t vl) { + return vfmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vfmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + double op1, vfloat64m1_t op2, size_t vl) { + return vfmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vfmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + double op1, vfloat64m2_t op2, size_t vl) { + return vfmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vfmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + double op1, vfloat64m4_t op2, size_t vl) { + return vfmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + vfloat64m8_t op1, vfloat64m8_t op2, + size_t vl) { + return vfmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1, + vfloat64m8_t op2, size_t vl) { + return vfmadd(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c new file mode 100644 index 0000000..ad19ae8 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c @@ -0,0 +1,560 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1, + vfloat32mf2_t op2, size_t vl) { + return vfmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vf_f32mf2(vfloat32mf2_t acc, float op1, + vfloat32mf2_t op2, size_t vl) { + return vfmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmsac_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1, + vfloat32m1_t op2, size_t vl) { + return vfmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmsac_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2, + size_t vl) { + return vfmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmsac_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1, + vfloat32m2_t op2, size_t vl) { + return vfmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmsac_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2, + size_t vl) { + return vfmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmsac_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1, + vfloat32m4_t op2, size_t vl) { + return vfmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmsac_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2, + size_t vl) { + return vfmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmsac_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1, + vfloat32m8_t op2, size_t vl) { + return vfmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmsac_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2, + size_t vl) { + return vfmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmsac_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1, + vfloat64m1_t op2, size_t vl) { + return vfmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmsac_vf_f64m1(vfloat64m1_t acc, double op1, + vfloat64m1_t op2, size_t vl) { + return vfmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmsac_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1, + vfloat64m2_t op2, size_t vl) { + return vfmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmsac_vf_f64m2(vfloat64m2_t acc, double op1, + vfloat64m2_t op2, size_t vl) { + return vfmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmsac_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1, + vfloat64m4_t op2, size_t vl) { + return vfmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmsac_vf_f64m4(vfloat64m4_t acc, double op1, + vfloat64m4_t op2, size_t vl) { + return vfmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmsac_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1, + vfloat64m8_t op2, size_t vl) { + return vfmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmsac_vf_f64m8(vfloat64m8_t acc, double op1, + vfloat64m8_t op2, size_t vl) { + return vfmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + float op1, vfloat32mf2_t op2, size_t vl) { + return vfmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, + vfloat32m1_t op2, size_t vl) { + return vfmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, + vfloat32m2_t op2, size_t vl) { + return vfmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, + vfloat32m8_t op1, vfloat32m8_t op2, + size_t vl) { + return vfmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, + vfloat32m8_t op2, size_t vl) { + return vfmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vfmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + double op1, vfloat64m1_t op2, size_t vl) { + return vfmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vfmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + double op1, vfloat64m2_t op2, size_t vl) { + return vfmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vfmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + double op1, vfloat64m4_t op2, size_t vl) { + return vfmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + vfloat64m8_t op1, vfloat64m8_t op2, + size_t vl) { + return vfmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1, + vfloat64m8_t op2, size_t vl) { + return vfmsac(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c new file mode 100644 index 0000000..91c393d --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c @@ -0,0 +1,560 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1, + vfloat32mf2_t op2, size_t vl) { + return vfmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vf_f32mf2(vfloat32mf2_t acc, float op1, + vfloat32mf2_t op2, size_t vl) { + return vfmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmsub_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1, + vfloat32m1_t op2, size_t vl) { + return vfmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmsub_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2, + size_t vl) { + return vfmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmsub_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1, + vfloat32m2_t op2, size_t vl) { + return vfmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmsub_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2, + size_t vl) { + return vfmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmsub_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1, + vfloat32m4_t op2, size_t vl) { + return vfmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmsub_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2, + size_t vl) { + return vfmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmsub_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1, + vfloat32m8_t op2, size_t vl) { + return vfmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmsub_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2, + size_t vl) { + return vfmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmsub_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1, + vfloat64m1_t op2, size_t vl) { + return vfmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmsub_vf_f64m1(vfloat64m1_t acc, double op1, + vfloat64m1_t op2, size_t vl) { + return vfmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmsub_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1, + vfloat64m2_t op2, size_t vl) { + return vfmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmsub_vf_f64m2(vfloat64m2_t acc, double op1, + vfloat64m2_t op2, size_t vl) { + return vfmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmsub_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1, + vfloat64m4_t op2, size_t vl) { + return vfmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmsub_vf_f64m4(vfloat64m4_t acc, double op1, + vfloat64m4_t op2, size_t vl) { + return vfmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmsub_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1, + vfloat64m8_t op2, size_t vl) { + return vfmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmsub_vf_f64m8(vfloat64m8_t acc, double op1, + vfloat64m8_t op2, size_t vl) { + return vfmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + float op1, vfloat32mf2_t op2, size_t vl) { + return vfmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, + vfloat32m1_t op2, size_t vl) { + return vfmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, + vfloat32m2_t op2, size_t vl) { + return vfmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, + vfloat32m8_t op1, vfloat32m8_t op2, + size_t vl) { + return vfmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, + vfloat32m8_t op2, size_t vl) { + return vfmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vfmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + double op1, vfloat64m1_t op2, size_t vl) { + return vfmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vfmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + double op1, vfloat64m2_t op2, size_t vl) { + return vfmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vfmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + double op1, vfloat64m4_t op2, size_t vl) { + return vfmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + vfloat64m8_t op1, vfloat64m8_t op2, + size_t vl) { + return vfmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1, + vfloat64m8_t op2, size_t vl) { + return vfmsub(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c new file mode 100644 index 0000000..816d4af --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c @@ -0,0 +1,561 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1, + vfloat32mf2_t op2, size_t vl) { + return vfnmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vf_f32mf2(vfloat32mf2_t acc, float op1, + vfloat32mf2_t op2, size_t vl) { + return vfnmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmacc_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1, + vfloat32m1_t op2, size_t vl) { + return vfnmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmacc_vf_f32m1(vfloat32m1_t acc, float op1, + vfloat32m1_t op2, size_t vl) { + return vfnmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmacc_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1, + vfloat32m2_t op2, size_t vl) { + return vfnmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmacc_vf_f32m2(vfloat32m2_t acc, float op1, + vfloat32m2_t op2, size_t vl) { + return vfnmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmacc_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1, + vfloat32m4_t op2, size_t vl) { + return vfnmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmacc_vf_f32m4(vfloat32m4_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfnmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmacc_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1, + vfloat32m8_t op2, size_t vl) { + return vfnmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmacc_vf_f32m8(vfloat32m8_t acc, float op1, + vfloat32m8_t op2, size_t vl) { + return vfnmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmacc_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1, + vfloat64m1_t op2, size_t vl) { + return vfnmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmacc_vf_f64m1(vfloat64m1_t acc, double op1, + vfloat64m1_t op2, size_t vl) { + return vfnmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmacc_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1, + vfloat64m2_t op2, size_t vl) { + return vfnmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmacc_vf_f64m2(vfloat64m2_t acc, double op1, + vfloat64m2_t op2, size_t vl) { + return vfnmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmacc_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1, + vfloat64m4_t op2, size_t vl) { + return vfnmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmacc_vf_f64m4(vfloat64m4_t acc, double op1, + vfloat64m4_t op2, size_t vl) { + return vfnmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmacc_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1, + vfloat64m8_t op2, size_t vl) { + return vfnmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmacc_vf_f64m8(vfloat64m8_t acc, double op1, + vfloat64m8_t op2, size_t vl) { + return vfnmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfnmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + float op1, vfloat32mf2_t op2, + size_t vl) { + return vfnmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfnmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, + float op1, vfloat32m1_t op2, size_t vl) { + return vfnmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfnmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, + float op1, vfloat32m2_t op2, size_t vl) { + return vfnmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfnmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfnmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, + vfloat32m8_t op1, vfloat32m8_t op2, + size_t vl) { + return vfnmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, + vfloat32m8_t op2, size_t vl) { + return vfnmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vfnmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + double op1, vfloat64m1_t op2, size_t vl) { + return vfnmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vfnmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + double op1, vfloat64m2_t op2, size_t vl) { + return vfnmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vfnmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + double op1, vfloat64m4_t op2, size_t vl) { + return vfnmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + vfloat64m8_t op1, vfloat64m8_t op2, + size_t vl) { + return vfnmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + double op1, vfloat64m8_t op2, size_t vl) { + return vfnmacc(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c new file mode 100644 index 0000000..7bddabb --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c @@ -0,0 +1,561 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1, + vfloat32mf2_t op2, size_t vl) { + return vfnmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vf_f32mf2(vfloat32mf2_t acc, float op1, + vfloat32mf2_t op2, size_t vl) { + return vfnmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmadd_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1, + vfloat32m1_t op2, size_t vl) { + return vfnmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmadd_vf_f32m1(vfloat32m1_t acc, float op1, + vfloat32m1_t op2, size_t vl) { + return vfnmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmadd_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1, + vfloat32m2_t op2, size_t vl) { + return vfnmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmadd_vf_f32m2(vfloat32m2_t acc, float op1, + vfloat32m2_t op2, size_t vl) { + return vfnmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmadd_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1, + vfloat32m4_t op2, size_t vl) { + return vfnmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmadd_vf_f32m4(vfloat32m4_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfnmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmadd_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1, + vfloat32m8_t op2, size_t vl) { + return vfnmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmadd_vf_f32m8(vfloat32m8_t acc, float op1, + vfloat32m8_t op2, size_t vl) { + return vfnmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmadd_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1, + vfloat64m1_t op2, size_t vl) { + return vfnmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmadd_vf_f64m1(vfloat64m1_t acc, double op1, + vfloat64m1_t op2, size_t vl) { + return vfnmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmadd_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1, + vfloat64m2_t op2, size_t vl) { + return vfnmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmadd_vf_f64m2(vfloat64m2_t acc, double op1, + vfloat64m2_t op2, size_t vl) { + return vfnmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmadd_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1, + vfloat64m4_t op2, size_t vl) { + return vfnmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmadd_vf_f64m4(vfloat64m4_t acc, double op1, + vfloat64m4_t op2, size_t vl) { + return vfnmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmadd_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1, + vfloat64m8_t op2, size_t vl) { + return vfnmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmadd_vf_f64m8(vfloat64m8_t acc, double op1, + vfloat64m8_t op2, size_t vl) { + return vfnmadd(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfnmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + float op1, vfloat32mf2_t op2, + size_t vl) { + return vfnmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfnmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, + float op1, vfloat32m1_t op2, size_t vl) { + return vfnmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfnmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, + float op1, vfloat32m2_t op2, size_t vl) { + return vfnmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfnmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfnmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, + vfloat32m8_t op1, vfloat32m8_t op2, + size_t vl) { + return vfnmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, + vfloat32m8_t op2, size_t vl) { + return vfnmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vfnmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + double op1, vfloat64m1_t op2, size_t vl) { + return vfnmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vfnmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + double op1, vfloat64m2_t op2, size_t vl) { + return vfnmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vfnmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + double op1, vfloat64m4_t op2, size_t vl) { + return vfnmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + vfloat64m8_t op1, vfloat64m8_t op2, + size_t vl) { + return vfnmadd(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + double op1, vfloat64m8_t op2, size_t vl) { + return vfnmadd(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c new file mode 100644 index 0000000..dbffecd --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c @@ -0,0 +1,561 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1, + vfloat32mf2_t op2, size_t vl) { + return vfnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vf_f32mf2(vfloat32mf2_t acc, float op1, + vfloat32mf2_t op2, size_t vl) { + return vfnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmsac_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1, + vfloat32m1_t op2, size_t vl) { + return vfnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmsac_vf_f32m1(vfloat32m1_t acc, float op1, + vfloat32m1_t op2, size_t vl) { + return vfnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmsac_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1, + vfloat32m2_t op2, size_t vl) { + return vfnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmsac_vf_f32m2(vfloat32m2_t acc, float op1, + vfloat32m2_t op2, size_t vl) { + return vfnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmsac_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1, + vfloat32m4_t op2, size_t vl) { + return vfnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmsac_vf_f32m4(vfloat32m4_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmsac_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1, + vfloat32m8_t op2, size_t vl) { + return vfnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmsac_vf_f32m8(vfloat32m8_t acc, float op1, + vfloat32m8_t op2, size_t vl) { + return vfnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmsac_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1, + vfloat64m1_t op2, size_t vl) { + return vfnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmsac_vf_f64m1(vfloat64m1_t acc, double op1, + vfloat64m1_t op2, size_t vl) { + return vfnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmsac_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1, + vfloat64m2_t op2, size_t vl) { + return vfnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmsac_vf_f64m2(vfloat64m2_t acc, double op1, + vfloat64m2_t op2, size_t vl) { + return vfnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmsac_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1, + vfloat64m4_t op2, size_t vl) { + return vfnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmsac_vf_f64m4(vfloat64m4_t acc, double op1, + vfloat64m4_t op2, size_t vl) { + return vfnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmsac_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1, + vfloat64m8_t op2, size_t vl) { + return vfnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmsac_vf_f64m8(vfloat64m8_t acc, double op1, + vfloat64m8_t op2, size_t vl) { + return vfnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + float op1, vfloat32mf2_t op2, + size_t vl) { + return vfnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, + float op1, vfloat32m1_t op2, size_t vl) { + return vfnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, + float op1, vfloat32m2_t op2, size_t vl) { + return vfnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, + vfloat32m8_t op1, vfloat32m8_t op2, + size_t vl) { + return vfnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, + vfloat32m8_t op2, size_t vl) { + return vfnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vfnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + double op1, vfloat64m1_t op2, size_t vl) { + return vfnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vfnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + double op1, vfloat64m2_t op2, size_t vl) { + return vfnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vfnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + double op1, vfloat64m4_t op2, size_t vl) { + return vfnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + vfloat64m8_t op1, vfloat64m8_t op2, + size_t vl) { + return vfnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + double op1, vfloat64m8_t op2, size_t vl) { + return vfnmsac(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c new file mode 100644 index 0000000..ae28fcd --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c @@ -0,0 +1,561 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1, + vfloat32mf2_t op2, size_t vl) { + return vfnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vf_f32mf2(vfloat32mf2_t acc, float op1, + vfloat32mf2_t op2, size_t vl) { + return vfnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmsub_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1, + vfloat32m1_t op2, size_t vl) { + return vfnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmsub_vf_f32m1(vfloat32m1_t acc, float op1, + vfloat32m1_t op2, size_t vl) { + return vfnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmsub_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1, + vfloat32m2_t op2, size_t vl) { + return vfnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmsub_vf_f32m2(vfloat32m2_t acc, float op1, + vfloat32m2_t op2, size_t vl) { + return vfnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmsub_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1, + vfloat32m4_t op2, size_t vl) { + return vfnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmsub_vf_f32m4(vfloat32m4_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmsub_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1, + vfloat32m8_t op2, size_t vl) { + return vfnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmsub_vf_f32m8(vfloat32m8_t acc, float op1, + vfloat32m8_t op2, size_t vl) { + return vfnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmsub_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1, + vfloat64m1_t op2, size_t vl) { + return vfnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmsub_vf_f64m1(vfloat64m1_t acc, double op1, + vfloat64m1_t op2, size_t vl) { + return vfnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmsub_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1, + vfloat64m2_t op2, size_t vl) { + return vfnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmsub_vf_f64m2(vfloat64m2_t acc, double op1, + vfloat64m2_t op2, size_t vl) { + return vfnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmsub_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1, + vfloat64m4_t op2, size_t vl) { + return vfnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmsub_vf_f64m4(vfloat64m4_t acc, double op1, + vfloat64m4_t op2, size_t vl) { + return vfnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmsub_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1, + vfloat64m8_t op2, size_t vl) { + return vfnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmsub_vf_f64m8(vfloat64m8_t acc, double op1, + vfloat64m8_t op2, size_t vl) { + return vfnmsub(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + float op1, vfloat32mf2_t op2, + size_t vl) { + return vfnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, + float op1, vfloat32m1_t op2, size_t vl) { + return vfnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, + float op1, vfloat32m2_t op2, size_t vl) { + return vfnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, + vfloat32m8_t op1, vfloat32m8_t op2, + size_t vl) { + return vfnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, + vfloat32m8_t op2, size_t vl) { + return vfnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vfnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + double op1, vfloat64m1_t op2, size_t vl) { + return vfnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vfnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + double op1, vfloat64m2_t op2, size_t vl) { + return vfnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vfnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + double op1, vfloat64m4_t op2, size_t vl) { + return vfnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + vfloat64m8_t op1, vfloat64m8_t op2, + size_t vl) { + return vfnmsub(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + double op1, vfloat64m8_t op2, size_t vl) { + return vfnmsub(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c new file mode 100644 index 0000000..e62526b --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c @@ -0,0 +1,491 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwadd_vv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { + return vfwadd_vf(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwadd_wv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { + return vfwadd_wf(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwadd_vv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { + return vfwadd_vf(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwadd_wv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { + return vfwadd_wf(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwadd_vv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { + return vfwadd_vf(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwadd_wv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { + return vfwadd_wf(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwadd_vv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { + return vfwadd_vf(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwadd_wv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { + return vfwadd_wf(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwadd_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, + vfloat32mf2_t op1, float op2, size_t vl) { + return vfwadd_vf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, + vfloat64m1_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwadd_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, + vfloat64m1_t op1, float op2, size_t vl) { + return vfwadd_wf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwadd_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, + vfloat32m1_t op1, float op2, size_t vl) { + return vfwadd_vf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwadd_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, + vfloat64m2_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwadd_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwadd_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, + vfloat64m2_t op1, float op2, size_t vl) { + return vfwadd_wf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwadd_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, + vfloat32m2_t op1, float op2, size_t vl) { + return vfwadd_vf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwadd_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, + vfloat64m4_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwadd_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwadd_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, + vfloat64m4_t op1, float op2, size_t vl) { + return vfwadd_wf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwadd_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, + vfloat32m4_t op1, float op2, size_t vl) { + return vfwadd_vf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwadd_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, + vfloat64m8_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwadd_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwadd_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, + vfloat64m8_t op1, float op2, size_t vl) { + return vfwadd_wf(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c new file mode 100644 index 0000000..15e79f5 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c @@ -0,0 +1,255 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1, + vfloat32mf2_t op2, size_t vl) { + return vfwmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vf_f64m1(vfloat64m1_t acc, float op1, + vfloat32mf2_t op2, size_t vl) { + return vfwmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwmacc_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1, + vfloat32m1_t op2, size_t vl) { + return vfwmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwmacc_vf_f64m2(vfloat64m2_t acc, float op1, + vfloat32m1_t op2, size_t vl) { + return vfwmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwmacc_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1, + vfloat32m2_t op2, size_t vl) { + return vfwmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwmacc_vf_f64m4(vfloat64m4_t acc, float op1, + vfloat32m2_t op2, size_t vl) { + return vfwmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwmacc_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1, + vfloat32m4_t op2, size_t vl) { + return vfwmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwmacc_vf_f64m8(vfloat64m8_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfwmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + float op1, vfloat32mf2_t op2, size_t vl) { + return vfwmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + float op1, vfloat32m1_t op2, size_t vl) { + return vfwmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + float op1, vfloat32m2_t op2, size_t vl) { + return vfwmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfwmacc(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c new file mode 100644 index 0000000..e2148c9 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c @@ -0,0 +1,255 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1, + vfloat32mf2_t op2, size_t vl) { + return vfwmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vf_f64m1(vfloat64m1_t acc, float op1, + vfloat32mf2_t op2, size_t vl) { + return vfwmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwmsac_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1, + vfloat32m1_t op2, size_t vl) { + return vfwmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwmsac_vf_f64m2(vfloat64m2_t acc, float op1, + vfloat32m1_t op2, size_t vl) { + return vfwmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwmsac_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1, + vfloat32m2_t op2, size_t vl) { + return vfwmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwmsac_vf_f64m4(vfloat64m4_t acc, float op1, + vfloat32m2_t op2, size_t vl) { + return vfwmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwmsac_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1, + vfloat32m4_t op2, size_t vl) { + return vfwmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwmsac_vf_f64m8(vfloat64m8_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfwmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + float op1, vfloat32mf2_t op2, size_t vl) { + return vfwmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + float op1, vfloat32m1_t op2, size_t vl) { + return vfwmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + float op1, vfloat32m2_t op2, size_t vl) { + return vfwmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfwmsac(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c new file mode 100644 index 0000000..c10067c --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c @@ -0,0 +1,251 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { + return vfwmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { + return vfwmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { + return vfwmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { + return vfwmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, + vfloat32mf2_t op1, float op2, size_t vl) { + return vfwmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, + vfloat32m1_t op1, float op2, size_t vl) { + return vfwmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, + vfloat32m2_t op1, float op2, size_t vl) { + return vfwmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, + vfloat32m4_t op1, float op2, size_t vl) { + return vfwmul(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c new file mode 100644 index 0000000..aa13e27 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c @@ -0,0 +1,255 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1, + vfloat32mf2_t op2, size_t vl) { + return vfwnmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vf_f64m1(vfloat64m1_t acc, float op1, + vfloat32mf2_t op2, size_t vl) { + return vfwnmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwnmacc_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1, + vfloat32m1_t op2, size_t vl) { + return vfwnmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwnmacc_vf_f64m2(vfloat64m2_t acc, float op1, + vfloat32m1_t op2, size_t vl) { + return vfwnmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwnmacc_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1, + vfloat32m2_t op2, size_t vl) { + return vfwnmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwnmacc_vf_f64m4(vfloat64m4_t acc, float op1, + vfloat32m2_t op2, size_t vl) { + return vfwnmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwnmacc_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1, + vfloat32m4_t op2, size_t vl) { + return vfwnmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwnmacc_vf_f64m8(vfloat64m8_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfwnmacc(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwnmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + float op1, vfloat32mf2_t op2, size_t vl) { + return vfwnmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwnmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + float op1, vfloat32m1_t op2, size_t vl) { + return vfwnmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwnmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + float op1, vfloat32m2_t op2, size_t vl) { + return vfwnmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwnmacc(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + float op1, vfloat32m4_t op2, size_t vl) { + return vfwnmacc(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c new file mode 100644 index 0000000..539f7d6 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c @@ -0,0 +1,255 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1, + vfloat32mf2_t op2, size_t vl) { + return vfwnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vf_f64m1(vfloat64m1_t acc, float op1, + vfloat32mf2_t op2, size_t vl) { + return vfwnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwnmsac_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1, + vfloat32m1_t op2, size_t vl) { + return vfwnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwnmsac_vf_f64m2(vfloat64m2_t acc, float op1, + vfloat32m1_t op2, size_t vl) { + return vfwnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwnmsac_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1, + vfloat32m2_t op2, size_t vl) { + return vfwnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwnmsac_vf_f64m4(vfloat64m4_t acc, float op1, + vfloat32m2_t op2, size_t vl) { + return vfwnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwnmsac_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1, + vfloat32m4_t op2, size_t vl) { + return vfwnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwnmsac_vf_f64m8(vfloat64m8_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfwnmsac(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + float op1, vfloat32mf2_t op2, size_t vl) { + return vfwnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + float op1, vfloat32m1_t op2, size_t vl) { + return vfwnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + float op1, vfloat32m2_t op2, size_t vl) { + return vfwnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwnmsac(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + float op1, vfloat32m4_t op2, size_t vl) { + return vfwnmsac(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c new file mode 100644 index 0000000..d86264a --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c @@ -0,0 +1,491 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwsub_vv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { + return vfwsub_vf(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwsub_wv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { + return vfwsub_wf(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwsub_vv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { + return vfwsub_vf(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwsub_wv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { + return vfwsub_wf(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwsub_vv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { + return vfwsub_vf(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwsub_wv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { + return vfwsub_wf(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwsub_vv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { + return vfwsub_vf(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwsub_wv(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { + return vfwsub_wf(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwsub_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, + vfloat32mf2_t op1, float op2, size_t vl) { + return vfwsub_vf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, + vfloat64m1_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwsub_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, + vfloat64m1_t op1, float op2, size_t vl) { + return vfwsub_wf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwsub_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, + vfloat32m1_t op1, float op2, size_t vl) { + return vfwsub_vf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwsub_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, + vfloat64m2_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwsub_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwsub_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, + vfloat64m2_t op1, float op2, size_t vl) { + return vfwsub_wf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwsub_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, + vfloat32m2_t op1, float op2, size_t vl) { + return vfwsub_vf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwsub_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, + vfloat64m4_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwsub_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwsub_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, + vfloat64m4_t op1, float op2, size_t vl) { + return vfwsub_wf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwsub_vv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, + vfloat32m4_t op1, float op2, size_t vl) { + return vfwsub_vf(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwsub_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, + vfloat64m8_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwsub_wv(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, + vfloat64m8_t op1, float op2, size_t vl) { + return vfwsub_wf(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c new file mode 100644 index 0000000..ed73e9a --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c @@ -0,0 +1,548 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f32mf2_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f32mf2_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m1_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m1_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m1_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m1_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m4_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m4_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m4_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m4_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m8_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m8_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m8_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m8_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m1_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m1_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m1_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m1_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m4_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m4_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m4_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m4_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m8_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m8_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m8_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m8_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { + return vmfeq(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f32mf2_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f32mf2_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat32mf2_t op1, float op2, size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m1_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m1_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m1_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m1_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat32m1_t op1, float op2, size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat32m2_t op1, float op2, size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m4_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m4_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m4_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m4_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat32m4_t op1, float op2, size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m8_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m8_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, + vfloat32m8_t op1, vfloat32m8_t op2, + size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m8_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m8_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, + vfloat32m8_t op1, float op2, size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m1_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m1_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m1_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m1_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat64m1_t op1, double op2, size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat64m2_t op1, double op2, size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m4_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m4_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m4_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m4_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat64m4_t op1, double op2, size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m8_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m8_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat64m8_t op1, vfloat64m8_t op2, + size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m8_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m8_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat64m8_t op1, double op2, size_t vl) { + return vmfeq(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c new file mode 100644 index 0000000..5d90b5e --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c @@ -0,0 +1,272 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vmfge_vf_f32mf2_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { + return vmfge(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f32m1_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f32m1_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { + return vmfge(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f32m2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f32m2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { + return vmfge(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f32m4_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f32m4_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { + return vmfge(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f32m8_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f32m8_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { + return vmfge(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f64m1_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f64m1_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { + return vmfge(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f64m2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f64m2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { + return vmfge(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f64m4_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f64m4_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { + return vmfge(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f64m8_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f64m8_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { + return vmfge(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f32mf2_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat32mf2_t op1, float op2, size_t vl) { + return vmfge(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f32m1_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f32m1_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat32m1_t op1, float op2, size_t vl) { + return vmfge(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f32m2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f32m2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat32m2_t op1, float op2, size_t vl) { + return vmfge(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f32m4_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f32m4_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat32m4_t op1, float op2, size_t vl) { + return vmfge(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f32m8_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f32m8_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, + vfloat32m8_t op1, float op2, size_t vl) { + return vmfge(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f64m1_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f64m1_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat64m1_t op1, double op2, size_t vl) { + return vmfge(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f64m2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f64m2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat64m2_t op1, double op2, size_t vl) { + return vmfge(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f64m4_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f64m4_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat64m4_t op1, double op2, size_t vl) { + return vmfge(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f64m8_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f64m8_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat64m8_t op1, double op2, size_t vl) { + return vmfge(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c new file mode 100644 index 0000000..8a3d1da --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c @@ -0,0 +1,272 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f32mf2_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { + return vmfgt(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m1_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m1_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { + return vmfgt(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { + return vmfgt(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m4_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m4_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { + return vmfgt(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m8_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m8_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { + return vmfgt(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m1_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m1_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { + return vmfgt(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { + return vmfgt(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m4_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m4_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { + return vmfgt(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m8_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m8_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { + return vmfgt(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f32mf2_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat32mf2_t op1, float op2, size_t vl) { + return vmfgt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m1_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m1_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat32m1_t op1, float op2, size_t vl) { + return vmfgt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat32m2_t op1, float op2, size_t vl) { + return vmfgt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m4_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m4_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat32m4_t op1, float op2, size_t vl) { + return vmfgt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m8_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m8_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, + vfloat32m8_t op1, float op2, size_t vl) { + return vmfgt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m1_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m1_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat64m1_t op1, double op2, size_t vl) { + return vmfgt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat64m2_t op1, double op2, size_t vl) { + return vmfgt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m4_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m4_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat64m4_t op1, double op2, size_t vl) { + return vmfgt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m8_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m8_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat64m8_t op1, double op2, size_t vl) { + return vmfgt(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c new file mode 100644 index 0000000..d0ea180 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c @@ -0,0 +1,548 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vmfle_vv_f32mf2_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f32mf2_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f32m1_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f32m1_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f32m1_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f32m1_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f32m2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f32m2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f32m2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f32m2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f32m4_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f32m4_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f32m4_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f32m4_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f32m8_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f32m8_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f32m8_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f32m8_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f64m1_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f64m1_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f64m1_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f64m1_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f64m2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f64m2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f64m2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f64m2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f64m4_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f64m4_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f64m4_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f64m4_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f64m8_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f64m8_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f64m8_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f64m8_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { + return vmfle(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f32mf2_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f32mf2_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat32mf2_t op1, float op2, size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f32m1_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f32m1_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f32m1_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f32m1_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat32m1_t op1, float op2, size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f32m2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f32m2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f32m2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f32m2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat32m2_t op1, float op2, size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f32m4_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f32m4_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f32m4_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f32m4_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat32m4_t op1, float op2, size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f32m8_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f32m8_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, + vfloat32m8_t op1, vfloat32m8_t op2, + size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f32m8_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f32m8_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, + vfloat32m8_t op1, float op2, size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f64m1_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f64m1_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f64m1_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f64m1_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat64m1_t op1, double op2, size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f64m2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f64m2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f64m2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f64m2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat64m2_t op1, double op2, size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f64m4_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f64m4_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f64m4_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f64m4_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat64m4_t op1, double op2, size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f64m8_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f64m8_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat64m8_t op1, vfloat64m8_t op2, + size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f64m8_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f64m8_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat64m8_t op1, double op2, size_t vl) { + return vmfle(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c new file mode 100644 index 0000000..4825bbb --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c @@ -0,0 +1,548 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vmflt_vv_f32mf2_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f32mf2_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f32m1_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f32m1_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f32m1_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f32m1_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f32m2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f32m2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f32m2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f32m2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f32m4_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f32m4_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f32m4_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f32m4_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f32m8_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f32m8_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f32m8_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f32m8_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f64m1_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f64m1_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f64m1_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f64m1_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f64m2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f64m2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f64m2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f64m2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f64m4_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f64m4_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f64m4_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f64m4_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f64m8_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f64m8_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f64m8_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f64m8_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { + return vmflt(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f32mf2_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f32mf2_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat32mf2_t op1, float op2, size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f32m1_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f32m1_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f32m1_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f32m1_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat32m1_t op1, float op2, size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f32m2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f32m2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f32m2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f32m2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat32m2_t op1, float op2, size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f32m4_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f32m4_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f32m4_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f32m4_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat32m4_t op1, float op2, size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f32m8_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f32m8_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, + vfloat32m8_t op1, vfloat32m8_t op2, + size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f32m8_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f32m8_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, + vfloat32m8_t op1, float op2, size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f64m1_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f64m1_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f64m1_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f64m1_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat64m1_t op1, double op2, size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f64m2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f64m2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f64m2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f64m2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat64m2_t op1, double op2, size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f64m4_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f64m4_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f64m4_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f64m4_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat64m4_t op1, double op2, size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f64m8_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f64m8_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat64m8_t op1, vfloat64m8_t op2, + size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f64m8_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f64m8_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat64m8_t op1, double op2, size_t vl) { + return vmflt(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c new file mode 100644 index 0000000..4093ca0 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c @@ -0,0 +1,548 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vmfne_vv_f32mf2_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f32mf2_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f32m1_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f32m1_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f32m1_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f32m1_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f32m2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f32m2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f32m2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f32m2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f32m4_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f32m4_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f32m4_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f32m4_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f32m8_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f32m8_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f32m8_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f32m8_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f64m1_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f64m1_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f64m1_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f64m1_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f64m2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f64m2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f64m2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f64m2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f64m4_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f64m4_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f64m4_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f64m4_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f64m8_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f64m8_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f64m8_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f64m8_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { + return vmfne(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f32mf2_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f32mf2_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat32mf2_t op1, float op2, size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f32m1_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f32m1_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f32m1_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f32m1_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat32m1_t op1, float op2, size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f32m2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f32m2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f32m2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f32m2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat32m2_t op1, float op2, size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f32m4_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f32m4_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f32m4_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f32m4_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat32m4_t op1, float op2, size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f32m8_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f32m8_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, + vfloat32m8_t op1, vfloat32m8_t op2, + size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f32m8_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f32m8_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, + vfloat32m8_t op1, float op2, size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f64m1_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f64m1_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f64m1_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f64m1_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat64m1_t op1, double op2, size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f64m2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f64m2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f64m2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f64m2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat64m2_t op1, double op2, size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f64m4_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f64m4_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f64m4_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f64m4_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat64m4_t op1, double op2, size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f64m8_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f64m8_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat64m8_t op1, vfloat64m8_t op2, + size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f64m8_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f64m8_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat64m8_t op1, double op2, size_t vl) { + return vmfne(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c new file mode 100644 index 0000000..e0f83ff --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c @@ -0,0 +1,560 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1, + vfloat32mf2_t op2, size_t vl) { + return vfmacc_vv_f32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vf_f32mf2(vfloat32mf2_t acc, float op1, + vfloat32mf2_t op2, size_t vl) { + return vfmacc_vf_f32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmacc_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1, + vfloat32m1_t op2, size_t vl) { + return vfmacc_vv_f32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmacc_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2, + size_t vl) { + return vfmacc_vf_f32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmacc_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1, + vfloat32m2_t op2, size_t vl) { + return vfmacc_vv_f32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmacc_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2, + size_t vl) { + return vfmacc_vf_f32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmacc_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1, + vfloat32m4_t op2, size_t vl) { + return vfmacc_vv_f32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmacc_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2, + size_t vl) { + return vfmacc_vf_f32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmacc_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1, + vfloat32m8_t op2, size_t vl) { + return vfmacc_vv_f32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmacc_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2, + size_t vl) { + return vfmacc_vf_f32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmacc_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1, + vfloat64m1_t op2, size_t vl) { + return vfmacc_vv_f64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmacc_vf_f64m1(vfloat64m1_t acc, double op1, + vfloat64m1_t op2, size_t vl) { + return vfmacc_vf_f64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmacc_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1, + vfloat64m2_t op2, size_t vl) { + return vfmacc_vv_f64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmacc_vf_f64m2(vfloat64m2_t acc, double op1, + vfloat64m2_t op2, size_t vl) { + return vfmacc_vf_f64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmacc_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1, + vfloat64m4_t op2, size_t vl) { + return vfmacc_vv_f64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmacc_vf_f64m4(vfloat64m4_t acc, double op1, + vfloat64m4_t op2, size_t vl) { + return vfmacc_vf_f64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmacc_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1, + vfloat64m8_t op2, size_t vl) { + return vfmacc_vv_f64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmacc_vf_f64m8(vfloat64m8_t acc, double op1, + vfloat64m8_t op2, size_t vl) { + return vfmacc_vf_f64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfmacc_vv_f32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + float op1, vfloat32mf2_t op2, size_t vl) { + return vfmacc_vf_f32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfmacc_vv_f32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, + vfloat32m1_t op2, size_t vl) { + return vfmacc_vf_f32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfmacc_vv_f32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, + vfloat32m2_t op2, size_t vl) { + return vfmacc_vf_f32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfmacc_vv_f32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfmacc_vf_f32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, + vfloat32m8_t op1, vfloat32m8_t op2, + size_t vl) { + return vfmacc_vv_f32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, + vfloat32m8_t op2, size_t vl) { + return vfmacc_vf_f32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vfmacc_vv_f64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + double op1, vfloat64m1_t op2, size_t vl) { + return vfmacc_vf_f64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vfmacc_vv_f64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + double op1, vfloat64m2_t op2, size_t vl) { + return vfmacc_vf_f64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vfmacc_vv_f64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + double op1, vfloat64m4_t op2, size_t vl) { + return vfmacc_vf_f64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + vfloat64m8_t op1, vfloat64m8_t op2, + size_t vl) { + return vfmacc_vv_f64m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1, + vfloat64m8_t op2, size_t vl) { + return vfmacc_vf_f64m8_m(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c new file mode 100644 index 0000000..01ab843 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c @@ -0,0 +1,560 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1, + vfloat32mf2_t op2, size_t vl) { + return vfmadd_vv_f32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vf_f32mf2(vfloat32mf2_t acc, float op1, + vfloat32mf2_t op2, size_t vl) { + return vfmadd_vf_f32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmadd_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1, + vfloat32m1_t op2, size_t vl) { + return vfmadd_vv_f32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmadd_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2, + size_t vl) { + return vfmadd_vf_f32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmadd_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1, + vfloat32m2_t op2, size_t vl) { + return vfmadd_vv_f32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmadd_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2, + size_t vl) { + return vfmadd_vf_f32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmadd_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1, + vfloat32m4_t op2, size_t vl) { + return vfmadd_vv_f32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmadd_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2, + size_t vl) { + return vfmadd_vf_f32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmadd_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1, + vfloat32m8_t op2, size_t vl) { + return vfmadd_vv_f32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmadd_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2, + size_t vl) { + return vfmadd_vf_f32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmadd_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1, + vfloat64m1_t op2, size_t vl) { + return vfmadd_vv_f64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmadd_vf_f64m1(vfloat64m1_t acc, double op1, + vfloat64m1_t op2, size_t vl) { + return vfmadd_vf_f64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmadd_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1, + vfloat64m2_t op2, size_t vl) { + return vfmadd_vv_f64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmadd_vf_f64m2(vfloat64m2_t acc, double op1, + vfloat64m2_t op2, size_t vl) { + return vfmadd_vf_f64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmadd_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1, + vfloat64m4_t op2, size_t vl) { + return vfmadd_vv_f64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmadd_vf_f64m4(vfloat64m4_t acc, double op1, + vfloat64m4_t op2, size_t vl) { + return vfmadd_vf_f64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmadd_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1, + vfloat64m8_t op2, size_t vl) { + return vfmadd_vv_f64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmadd_vf_f64m8(vfloat64m8_t acc, double op1, + vfloat64m8_t op2, size_t vl) { + return vfmadd_vf_f64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfmadd_vv_f32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + float op1, vfloat32mf2_t op2, size_t vl) { + return vfmadd_vf_f32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfmadd_vv_f32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, + vfloat32m1_t op2, size_t vl) { + return vfmadd_vf_f32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfmadd_vv_f32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, + vfloat32m2_t op2, size_t vl) { + return vfmadd_vf_f32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfmadd_vv_f32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfmadd_vf_f32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, + vfloat32m8_t op1, vfloat32m8_t op2, + size_t vl) { + return vfmadd_vv_f32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, + vfloat32m8_t op2, size_t vl) { + return vfmadd_vf_f32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vfmadd_vv_f64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + double op1, vfloat64m1_t op2, size_t vl) { + return vfmadd_vf_f64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vfmadd_vv_f64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + double op1, vfloat64m2_t op2, size_t vl) { + return vfmadd_vf_f64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vfmadd_vv_f64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + double op1, vfloat64m4_t op2, size_t vl) { + return vfmadd_vf_f64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + vfloat64m8_t op1, vfloat64m8_t op2, + size_t vl) { + return vfmadd_vv_f64m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1, + vfloat64m8_t op2, size_t vl) { + return vfmadd_vf_f64m8_m(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c new file mode 100644 index 0000000..dcbab0c --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c @@ -0,0 +1,560 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1, + vfloat32mf2_t op2, size_t vl) { + return vfmsac_vv_f32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vf_f32mf2(vfloat32mf2_t acc, float op1, + vfloat32mf2_t op2, size_t vl) { + return vfmsac_vf_f32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmsac_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1, + vfloat32m1_t op2, size_t vl) { + return vfmsac_vv_f32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmsac_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2, + size_t vl) { + return vfmsac_vf_f32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmsac_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1, + vfloat32m2_t op2, size_t vl) { + return vfmsac_vv_f32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmsac_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2, + size_t vl) { + return vfmsac_vf_f32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmsac_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1, + vfloat32m4_t op2, size_t vl) { + return vfmsac_vv_f32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmsac_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2, + size_t vl) { + return vfmsac_vf_f32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmsac_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1, + vfloat32m8_t op2, size_t vl) { + return vfmsac_vv_f32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmsac_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2, + size_t vl) { + return vfmsac_vf_f32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmsac_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1, + vfloat64m1_t op2, size_t vl) { + return vfmsac_vv_f64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmsac_vf_f64m1(vfloat64m1_t acc, double op1, + vfloat64m1_t op2, size_t vl) { + return vfmsac_vf_f64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmsac_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1, + vfloat64m2_t op2, size_t vl) { + return vfmsac_vv_f64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmsac_vf_f64m2(vfloat64m2_t acc, double op1, + vfloat64m2_t op2, size_t vl) { + return vfmsac_vf_f64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmsac_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1, + vfloat64m4_t op2, size_t vl) { + return vfmsac_vv_f64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmsac_vf_f64m4(vfloat64m4_t acc, double op1, + vfloat64m4_t op2, size_t vl) { + return vfmsac_vf_f64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmsac_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1, + vfloat64m8_t op2, size_t vl) { + return vfmsac_vv_f64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmsac_vf_f64m8(vfloat64m8_t acc, double op1, + vfloat64m8_t op2, size_t vl) { + return vfmsac_vf_f64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfmsac_vv_f32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + float op1, vfloat32mf2_t op2, size_t vl) { + return vfmsac_vf_f32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfmsac_vv_f32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, + vfloat32m1_t op2, size_t vl) { + return vfmsac_vf_f32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfmsac_vv_f32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, + vfloat32m2_t op2, size_t vl) { + return vfmsac_vf_f32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfmsac_vv_f32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfmsac_vf_f32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, + vfloat32m8_t op1, vfloat32m8_t op2, + size_t vl) { + return vfmsac_vv_f32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, + vfloat32m8_t op2, size_t vl) { + return vfmsac_vf_f32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vfmsac_vv_f64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + double op1, vfloat64m1_t op2, size_t vl) { + return vfmsac_vf_f64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vfmsac_vv_f64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + double op1, vfloat64m2_t op2, size_t vl) { + return vfmsac_vf_f64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vfmsac_vv_f64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + double op1, vfloat64m4_t op2, size_t vl) { + return vfmsac_vf_f64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + vfloat64m8_t op1, vfloat64m8_t op2, + size_t vl) { + return vfmsac_vv_f64m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1, + vfloat64m8_t op2, size_t vl) { + return vfmsac_vf_f64m8_m(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c new file mode 100644 index 0000000..2c0e35c --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c @@ -0,0 +1,560 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1, + vfloat32mf2_t op2, size_t vl) { + return vfmsub_vv_f32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vf_f32mf2(vfloat32mf2_t acc, float op1, + vfloat32mf2_t op2, size_t vl) { + return vfmsub_vf_f32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmsub_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1, + vfloat32m1_t op2, size_t vl) { + return vfmsub_vv_f32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmsub_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2, + size_t vl) { + return vfmsub_vf_f32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmsub_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1, + vfloat32m2_t op2, size_t vl) { + return vfmsub_vv_f32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmsub_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2, + size_t vl) { + return vfmsub_vf_f32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmsub_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1, + vfloat32m4_t op2, size_t vl) { + return vfmsub_vv_f32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmsub_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2, + size_t vl) { + return vfmsub_vf_f32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmsub_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1, + vfloat32m8_t op2, size_t vl) { + return vfmsub_vv_f32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmsub_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2, + size_t vl) { + return vfmsub_vf_f32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmsub_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1, + vfloat64m1_t op2, size_t vl) { + return vfmsub_vv_f64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmsub_vf_f64m1(vfloat64m1_t acc, double op1, + vfloat64m1_t op2, size_t vl) { + return vfmsub_vf_f64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmsub_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1, + vfloat64m2_t op2, size_t vl) { + return vfmsub_vv_f64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmsub_vf_f64m2(vfloat64m2_t acc, double op1, + vfloat64m2_t op2, size_t vl) { + return vfmsub_vf_f64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmsub_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1, + vfloat64m4_t op2, size_t vl) { + return vfmsub_vv_f64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmsub_vf_f64m4(vfloat64m4_t acc, double op1, + vfloat64m4_t op2, size_t vl) { + return vfmsub_vf_f64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmsub_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1, + vfloat64m8_t op2, size_t vl) { + return vfmsub_vv_f64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmsub_vf_f64m8(vfloat64m8_t acc, double op1, + vfloat64m8_t op2, size_t vl) { + return vfmsub_vf_f64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfmsub_vv_f32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + float op1, vfloat32mf2_t op2, size_t vl) { + return vfmsub_vf_f32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfmsub_vv_f32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, + vfloat32m1_t op2, size_t vl) { + return vfmsub_vf_f32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfmsub_vv_f32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, + vfloat32m2_t op2, size_t vl) { + return vfmsub_vf_f32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfmsub_vv_f32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfmsub_vf_f32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, + vfloat32m8_t op1, vfloat32m8_t op2, + size_t vl) { + return vfmsub_vv_f32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, + vfloat32m8_t op2, size_t vl) { + return vfmsub_vf_f32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vfmsub_vv_f64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + double op1, vfloat64m1_t op2, size_t vl) { + return vfmsub_vf_f64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vfmsub_vv_f64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + double op1, vfloat64m2_t op2, size_t vl) { + return vfmsub_vf_f64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vfmsub_vv_f64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + double op1, vfloat64m4_t op2, size_t vl) { + return vfmsub_vf_f64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + vfloat64m8_t op1, vfloat64m8_t op2, + size_t vl) { + return vfmsub_vv_f64m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1, + vfloat64m8_t op2, size_t vl) { + return vfmsub_vf_f64m8_m(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c new file mode 100644 index 0000000..3040744 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c @@ -0,0 +1,561 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1, + vfloat32mf2_t op2, size_t vl) { + return vfnmacc_vv_f32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vf_f32mf2(vfloat32mf2_t acc, float op1, + vfloat32mf2_t op2, size_t vl) { + return vfnmacc_vf_f32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmacc_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1, + vfloat32m1_t op2, size_t vl) { + return vfnmacc_vv_f32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmacc_vf_f32m1(vfloat32m1_t acc, float op1, + vfloat32m1_t op2, size_t vl) { + return vfnmacc_vf_f32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmacc_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1, + vfloat32m2_t op2, size_t vl) { + return vfnmacc_vv_f32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmacc_vf_f32m2(vfloat32m2_t acc, float op1, + vfloat32m2_t op2, size_t vl) { + return vfnmacc_vf_f32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmacc_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1, + vfloat32m4_t op2, size_t vl) { + return vfnmacc_vv_f32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmacc_vf_f32m4(vfloat32m4_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfnmacc_vf_f32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmacc_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1, + vfloat32m8_t op2, size_t vl) { + return vfnmacc_vv_f32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmacc_vf_f32m8(vfloat32m8_t acc, float op1, + vfloat32m8_t op2, size_t vl) { + return vfnmacc_vf_f32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmacc_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1, + vfloat64m1_t op2, size_t vl) { + return vfnmacc_vv_f64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmacc_vf_f64m1(vfloat64m1_t acc, double op1, + vfloat64m1_t op2, size_t vl) { + return vfnmacc_vf_f64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmacc_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1, + vfloat64m2_t op2, size_t vl) { + return vfnmacc_vv_f64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmacc_vf_f64m2(vfloat64m2_t acc, double op1, + vfloat64m2_t op2, size_t vl) { + return vfnmacc_vf_f64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmacc_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1, + vfloat64m4_t op2, size_t vl) { + return vfnmacc_vv_f64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmacc_vf_f64m4(vfloat64m4_t acc, double op1, + vfloat64m4_t op2, size_t vl) { + return vfnmacc_vf_f64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmacc_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1, + vfloat64m8_t op2, size_t vl) { + return vfnmacc_vv_f64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmacc_vf_f64m8(vfloat64m8_t acc, double op1, + vfloat64m8_t op2, size_t vl) { + return vfnmacc_vf_f64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfnmacc_vv_f32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + float op1, vfloat32mf2_t op2, + size_t vl) { + return vfnmacc_vf_f32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfnmacc_vv_f32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, + float op1, vfloat32m1_t op2, size_t vl) { + return vfnmacc_vf_f32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfnmacc_vv_f32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, + float op1, vfloat32m2_t op2, size_t vl) { + return vfnmacc_vf_f32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfnmacc_vv_f32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfnmacc_vf_f32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, + vfloat32m8_t op1, vfloat32m8_t op2, + size_t vl) { + return vfnmacc_vv_f32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, + vfloat32m8_t op2, size_t vl) { + return vfnmacc_vf_f32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vfnmacc_vv_f64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + double op1, vfloat64m1_t op2, size_t vl) { + return vfnmacc_vf_f64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vfnmacc_vv_f64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + double op1, vfloat64m2_t op2, size_t vl) { + return vfnmacc_vf_f64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vfnmacc_vv_f64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + double op1, vfloat64m4_t op2, size_t vl) { + return vfnmacc_vf_f64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + vfloat64m8_t op1, vfloat64m8_t op2, + size_t vl) { + return vfnmacc_vv_f64m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + double op1, vfloat64m8_t op2, size_t vl) { + return vfnmacc_vf_f64m8_m(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c new file mode 100644 index 0000000..79cd39d --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c @@ -0,0 +1,561 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1, + vfloat32mf2_t op2, size_t vl) { + return vfnmadd_vv_f32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vf_f32mf2(vfloat32mf2_t acc, float op1, + vfloat32mf2_t op2, size_t vl) { + return vfnmadd_vf_f32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmadd_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1, + vfloat32m1_t op2, size_t vl) { + return vfnmadd_vv_f32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmadd_vf_f32m1(vfloat32m1_t acc, float op1, + vfloat32m1_t op2, size_t vl) { + return vfnmadd_vf_f32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmadd_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1, + vfloat32m2_t op2, size_t vl) { + return vfnmadd_vv_f32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmadd_vf_f32m2(vfloat32m2_t acc, float op1, + vfloat32m2_t op2, size_t vl) { + return vfnmadd_vf_f32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmadd_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1, + vfloat32m4_t op2, size_t vl) { + return vfnmadd_vv_f32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmadd_vf_f32m4(vfloat32m4_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfnmadd_vf_f32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmadd_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1, + vfloat32m8_t op2, size_t vl) { + return vfnmadd_vv_f32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmadd_vf_f32m8(vfloat32m8_t acc, float op1, + vfloat32m8_t op2, size_t vl) { + return vfnmadd_vf_f32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmadd_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1, + vfloat64m1_t op2, size_t vl) { + return vfnmadd_vv_f64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmadd_vf_f64m1(vfloat64m1_t acc, double op1, + vfloat64m1_t op2, size_t vl) { + return vfnmadd_vf_f64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmadd_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1, + vfloat64m2_t op2, size_t vl) { + return vfnmadd_vv_f64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmadd_vf_f64m2(vfloat64m2_t acc, double op1, + vfloat64m2_t op2, size_t vl) { + return vfnmadd_vf_f64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmadd_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1, + vfloat64m4_t op2, size_t vl) { + return vfnmadd_vv_f64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmadd_vf_f64m4(vfloat64m4_t acc, double op1, + vfloat64m4_t op2, size_t vl) { + return vfnmadd_vf_f64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmadd_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1, + vfloat64m8_t op2, size_t vl) { + return vfnmadd_vv_f64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmadd_vf_f64m8(vfloat64m8_t acc, double op1, + vfloat64m8_t op2, size_t vl) { + return vfnmadd_vf_f64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfnmadd_vv_f32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + float op1, vfloat32mf2_t op2, + size_t vl) { + return vfnmadd_vf_f32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfnmadd_vv_f32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, + float op1, vfloat32m1_t op2, size_t vl) { + return vfnmadd_vf_f32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfnmadd_vv_f32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, + float op1, vfloat32m2_t op2, size_t vl) { + return vfnmadd_vf_f32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfnmadd_vv_f32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfnmadd_vf_f32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, + vfloat32m8_t op1, vfloat32m8_t op2, + size_t vl) { + return vfnmadd_vv_f32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, + vfloat32m8_t op2, size_t vl) { + return vfnmadd_vf_f32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vfnmadd_vv_f64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + double op1, vfloat64m1_t op2, size_t vl) { + return vfnmadd_vf_f64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vfnmadd_vv_f64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + double op1, vfloat64m2_t op2, size_t vl) { + return vfnmadd_vf_f64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vfnmadd_vv_f64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + double op1, vfloat64m4_t op2, size_t vl) { + return vfnmadd_vf_f64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + vfloat64m8_t op1, vfloat64m8_t op2, + size_t vl) { + return vfnmadd_vv_f64m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + double op1, vfloat64m8_t op2, size_t vl) { + return vfnmadd_vf_f64m8_m(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c new file mode 100644 index 0000000..259e60f --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c @@ -0,0 +1,561 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1, + vfloat32mf2_t op2, size_t vl) { + return vfnmsac_vv_f32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vf_f32mf2(vfloat32mf2_t acc, float op1, + vfloat32mf2_t op2, size_t vl) { + return vfnmsac_vf_f32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmsac_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1, + vfloat32m1_t op2, size_t vl) { + return vfnmsac_vv_f32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmsac_vf_f32m1(vfloat32m1_t acc, float op1, + vfloat32m1_t op2, size_t vl) { + return vfnmsac_vf_f32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmsac_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1, + vfloat32m2_t op2, size_t vl) { + return vfnmsac_vv_f32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmsac_vf_f32m2(vfloat32m2_t acc, float op1, + vfloat32m2_t op2, size_t vl) { + return vfnmsac_vf_f32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmsac_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1, + vfloat32m4_t op2, size_t vl) { + return vfnmsac_vv_f32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmsac_vf_f32m4(vfloat32m4_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfnmsac_vf_f32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmsac_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1, + vfloat32m8_t op2, size_t vl) { + return vfnmsac_vv_f32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmsac_vf_f32m8(vfloat32m8_t acc, float op1, + vfloat32m8_t op2, size_t vl) { + return vfnmsac_vf_f32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmsac_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1, + vfloat64m1_t op2, size_t vl) { + return vfnmsac_vv_f64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmsac_vf_f64m1(vfloat64m1_t acc, double op1, + vfloat64m1_t op2, size_t vl) { + return vfnmsac_vf_f64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmsac_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1, + vfloat64m2_t op2, size_t vl) { + return vfnmsac_vv_f64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmsac_vf_f64m2(vfloat64m2_t acc, double op1, + vfloat64m2_t op2, size_t vl) { + return vfnmsac_vf_f64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmsac_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1, + vfloat64m4_t op2, size_t vl) { + return vfnmsac_vv_f64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmsac_vf_f64m4(vfloat64m4_t acc, double op1, + vfloat64m4_t op2, size_t vl) { + return vfnmsac_vf_f64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmsac_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1, + vfloat64m8_t op2, size_t vl) { + return vfnmsac_vv_f64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmsac_vf_f64m8(vfloat64m8_t acc, double op1, + vfloat64m8_t op2, size_t vl) { + return vfnmsac_vf_f64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfnmsac_vv_f32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + float op1, vfloat32mf2_t op2, + size_t vl) { + return vfnmsac_vf_f32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfnmsac_vv_f32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, + float op1, vfloat32m1_t op2, size_t vl) { + return vfnmsac_vf_f32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfnmsac_vv_f32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, + float op1, vfloat32m2_t op2, size_t vl) { + return vfnmsac_vf_f32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfnmsac_vv_f32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfnmsac_vf_f32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, + vfloat32m8_t op1, vfloat32m8_t op2, + size_t vl) { + return vfnmsac_vv_f32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, + vfloat32m8_t op2, size_t vl) { + return vfnmsac_vf_f32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vfnmsac_vv_f64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + double op1, vfloat64m1_t op2, size_t vl) { + return vfnmsac_vf_f64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vfnmsac_vv_f64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + double op1, vfloat64m2_t op2, size_t vl) { + return vfnmsac_vf_f64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vfnmsac_vv_f64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + double op1, vfloat64m4_t op2, size_t vl) { + return vfnmsac_vf_f64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + vfloat64m8_t op1, vfloat64m8_t op2, + size_t vl) { + return vfnmsac_vv_f64m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + double op1, vfloat64m8_t op2, size_t vl) { + return vfnmsac_vf_f64m8_m(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c new file mode 100644 index 0000000..a77ee03 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c @@ -0,0 +1,561 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1, + vfloat32mf2_t op2, size_t vl) { + return vfnmsub_vv_f32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vf_f32mf2(vfloat32mf2_t acc, float op1, + vfloat32mf2_t op2, size_t vl) { + return vfnmsub_vf_f32mf2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmsub_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1, + vfloat32m1_t op2, size_t vl) { + return vfnmsub_vv_f32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmsub_vf_f32m1(vfloat32m1_t acc, float op1, + vfloat32m1_t op2, size_t vl) { + return vfnmsub_vf_f32m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmsub_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1, + vfloat32m2_t op2, size_t vl) { + return vfnmsub_vv_f32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmsub_vf_f32m2(vfloat32m2_t acc, float op1, + vfloat32m2_t op2, size_t vl) { + return vfnmsub_vf_f32m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmsub_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1, + vfloat32m4_t op2, size_t vl) { + return vfnmsub_vv_f32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmsub_vf_f32m4(vfloat32m4_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfnmsub_vf_f32m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmsub_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1, + vfloat32m8_t op2, size_t vl) { + return vfnmsub_vv_f32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmsub_vf_f32m8(vfloat32m8_t acc, float op1, + vfloat32m8_t op2, size_t vl) { + return vfnmsub_vf_f32m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmsub_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1, + vfloat64m1_t op2, size_t vl) { + return vfnmsub_vv_f64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmsub_vf_f64m1(vfloat64m1_t acc, double op1, + vfloat64m1_t op2, size_t vl) { + return vfnmsub_vf_f64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmsub_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1, + vfloat64m2_t op2, size_t vl) { + return vfnmsub_vv_f64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmsub_vf_f64m2(vfloat64m2_t acc, double op1, + vfloat64m2_t op2, size_t vl) { + return vfnmsub_vf_f64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmsub_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1, + vfloat64m4_t op2, size_t vl) { + return vfnmsub_vv_f64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmsub_vf_f64m4(vfloat64m4_t acc, double op1, + vfloat64m4_t op2, size_t vl) { + return vfnmsub_vf_f64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmsub_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1, + vfloat64m8_t op2, size_t vl) { + return vfnmsub_vv_f64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmsub_vf_f64m8(vfloat64m8_t acc, double op1, + vfloat64m8_t op2, size_t vl) { + return vfnmsub_vf_f64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfnmsub_vv_f32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfnmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, + float op1, vfloat32mf2_t op2, + size_t vl) { + return vfnmsub_vf_f32mf2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfnmsub_vv_f32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfnmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, + float op1, vfloat32m1_t op2, size_t vl) { + return vfnmsub_vf_f32m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfnmsub_vv_f32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfnmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, + float op1, vfloat32m2_t op2, size_t vl) { + return vfnmsub_vf_f32m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfnmsub_vv_f32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfnmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfnmsub_vf_f32m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, + vfloat32m8_t op1, vfloat32m8_t op2, + size_t vl) { + return vfnmsub_vv_f32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfnmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, + vfloat32m8_t op2, size_t vl) { + return vfnmsub_vf_f32m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vfnmsub_vv_f64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfnmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + double op1, vfloat64m1_t op2, size_t vl) { + return vfnmsub_vf_f64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vfnmsub_vv_f64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfnmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + double op1, vfloat64m2_t op2, size_t vl) { + return vfnmsub_vf_f64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vfnmsub_vv_f64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfnmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + double op1, vfloat64m4_t op2, size_t vl) { + return vfnmsub_vf_f64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + vfloat64m8_t op1, vfloat64m8_t op2, + size_t vl) { + return vfnmsub_vv_f64m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfnmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + double op1, vfloat64m8_t op2, size_t vl) { + return vfnmsub_vf_f64m8_m(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c new file mode 100644 index 0000000..4f5ec65 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c @@ -0,0 +1,491 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwadd_vv_f64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { + return vfwadd_vf_f64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwadd_wv_f64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { + return vfwadd_wf_f64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwadd_vv_f64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { + return vfwadd_vf_f64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwadd_wv_f64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { + return vfwadd_wf_f64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwadd_vv_f64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { + return vfwadd_vf_f64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwadd_wv_f64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { + return vfwadd_wf_f64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwadd_vv_f64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { + return vfwadd_vf_f64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwadd_wv_f64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { + return vfwadd_wf_f64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwadd_vv_f64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, + vfloat32mf2_t op1, float op2, size_t vl) { + return vfwadd_vf_f64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, + vfloat64m1_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwadd_wv_f64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwadd_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, + vfloat64m1_t op1, float op2, size_t vl) { + return vfwadd_wf_f64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwadd_vv_f64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, + vfloat32m1_t op1, float op2, size_t vl) { + return vfwadd_vf_f64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwadd_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, + vfloat64m2_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwadd_wv_f64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwadd_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, + vfloat64m2_t op1, float op2, size_t vl) { + return vfwadd_wf_f64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwadd_vv_f64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, + vfloat32m2_t op1, float op2, size_t vl) { + return vfwadd_vf_f64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwadd_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, + vfloat64m4_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwadd_wv_f64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwadd_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, + vfloat64m4_t op1, float op2, size_t vl) { + return vfwadd_wf_f64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwadd_vv_f64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, + vfloat32m4_t op1, float op2, size_t vl) { + return vfwadd_vf_f64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwadd_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, + vfloat64m8_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwadd_wv_f64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwadd_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, + vfloat64m8_t op1, float op2, size_t vl) { + return vfwadd_wf_f64m8_m(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c new file mode 100644 index 0000000..0693c16 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c @@ -0,0 +1,255 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1, + vfloat32mf2_t op2, size_t vl) { + return vfwmacc_vv_f64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vf_f64m1(vfloat64m1_t acc, float op1, + vfloat32mf2_t op2, size_t vl) { + return vfwmacc_vf_f64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwmacc_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1, + vfloat32m1_t op2, size_t vl) { + return vfwmacc_vv_f64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwmacc_vf_f64m2(vfloat64m2_t acc, float op1, + vfloat32m1_t op2, size_t vl) { + return vfwmacc_vf_f64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwmacc_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1, + vfloat32m2_t op2, size_t vl) { + return vfwmacc_vv_f64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwmacc_vf_f64m4(vfloat64m4_t acc, float op1, + vfloat32m2_t op2, size_t vl) { + return vfwmacc_vf_f64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwmacc_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1, + vfloat32m4_t op2, size_t vl) { + return vfwmacc_vv_f64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwmacc_vf_f64m8(vfloat64m8_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfwmacc_vf_f64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwmacc_vv_f64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + float op1, vfloat32mf2_t op2, size_t vl) { + return vfwmacc_vf_f64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwmacc_vv_f64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + float op1, vfloat32m1_t op2, size_t vl) { + return vfwmacc_vf_f64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwmacc_vv_f64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + float op1, vfloat32m2_t op2, size_t vl) { + return vfwmacc_vf_f64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwmacc_vv_f64m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfwmacc_vf_f64m8_m(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c new file mode 100644 index 0000000..cec278e --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c @@ -0,0 +1,255 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1, + vfloat32mf2_t op2, size_t vl) { + return vfwmsac_vv_f64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vf_f64m1(vfloat64m1_t acc, float op1, + vfloat32mf2_t op2, size_t vl) { + return vfwmsac_vf_f64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwmsac_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1, + vfloat32m1_t op2, size_t vl) { + return vfwmsac_vv_f64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwmsac_vf_f64m2(vfloat64m2_t acc, float op1, + vfloat32m1_t op2, size_t vl) { + return vfwmsac_vf_f64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwmsac_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1, + vfloat32m2_t op2, size_t vl) { + return vfwmsac_vv_f64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwmsac_vf_f64m4(vfloat64m4_t acc, float op1, + vfloat32m2_t op2, size_t vl) { + return vfwmsac_vf_f64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwmsac_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1, + vfloat32m4_t op2, size_t vl) { + return vfwmsac_vv_f64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwmsac_vf_f64m8(vfloat64m8_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfwmsac_vf_f64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwmsac_vv_f64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + float op1, vfloat32mf2_t op2, size_t vl) { + return vfwmsac_vf_f64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwmsac_vv_f64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + float op1, vfloat32m1_t op2, size_t vl) { + return vfwmsac_vf_f64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwmsac_vv_f64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + float op1, vfloat32m2_t op2, size_t vl) { + return vfwmsac_vf_f64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwmsac_vv_f64m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfwmsac_vf_f64m8_m(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c new file mode 100644 index 0000000..b40af3e --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c @@ -0,0 +1,251 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwmul_vv_f64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { + return vfwmul_vf_f64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwmul_vv_f64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { + return vfwmul_vf_f64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwmul_vv_f64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { + return vfwmul_vf_f64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwmul_vv_f64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { + return vfwmul_vf_f64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwmul_vv_f64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, + vfloat32mf2_t op1, float op2, size_t vl) { + return vfwmul_vf_f64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwmul_vv_f64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, + vfloat32m1_t op1, float op2, size_t vl) { + return vfwmul_vf_f64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwmul_vv_f64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, + vfloat32m2_t op1, float op2, size_t vl) { + return vfwmul_vf_f64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwmul_vv_f64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, + vfloat32m4_t op1, float op2, size_t vl) { + return vfwmul_vf_f64m8_m(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c new file mode 100644 index 0000000..4d15978 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c @@ -0,0 +1,255 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1, + vfloat32mf2_t op2, size_t vl) { + return vfwnmacc_vv_f64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vf_f64m1(vfloat64m1_t acc, float op1, + vfloat32mf2_t op2, size_t vl) { + return vfwnmacc_vf_f64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwnmacc_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1, + vfloat32m1_t op2, size_t vl) { + return vfwnmacc_vv_f64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwnmacc_vf_f64m2(vfloat64m2_t acc, float op1, + vfloat32m1_t op2, size_t vl) { + return vfwnmacc_vf_f64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwnmacc_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1, + vfloat32m2_t op2, size_t vl) { + return vfwnmacc_vv_f64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwnmacc_vf_f64m4(vfloat64m4_t acc, float op1, + vfloat32m2_t op2, size_t vl) { + return vfwnmacc_vf_f64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwnmacc_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1, + vfloat32m4_t op2, size_t vl) { + return vfwnmacc_vv_f64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwnmacc_vf_f64m8(vfloat64m8_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfwnmacc_vf_f64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwnmacc_vv_f64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + float op1, vfloat32mf2_t op2, size_t vl) { + return vfwnmacc_vf_f64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwnmacc_vv_f64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + float op1, vfloat32m1_t op2, size_t vl) { + return vfwnmacc_vf_f64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwnmacc_vv_f64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + float op1, vfloat32m2_t op2, size_t vl) { + return vfwnmacc_vf_f64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwnmacc_vv_f64m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + float op1, vfloat32m4_t op2, size_t vl) { + return vfwnmacc_vf_f64m8_m(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c new file mode 100644 index 0000000..44bd012 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c @@ -0,0 +1,255 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1, + vfloat32mf2_t op2, size_t vl) { + return vfwnmsac_vv_f64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vf_f64m1(vfloat64m1_t acc, float op1, + vfloat32mf2_t op2, size_t vl) { + return vfwnmsac_vf_f64m1(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwnmsac_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1, + vfloat32m1_t op2, size_t vl) { + return vfwnmsac_vv_f64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwnmsac_vf_f64m2(vfloat64m2_t acc, float op1, + vfloat32m1_t op2, size_t vl) { + return vfwnmsac_vf_f64m2(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwnmsac_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1, + vfloat32m2_t op2, size_t vl) { + return vfwnmsac_vv_f64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwnmsac_vf_f64m4(vfloat64m4_t acc, float op1, + vfloat32m2_t op2, size_t vl) { + return vfwnmsac_vf_f64m4(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwnmsac_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1, + vfloat32m4_t op2, size_t vl) { + return vfwnmsac_vv_f64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwnmsac_vf_f64m8(vfloat64m8_t acc, float op1, + vfloat32m4_t op2, size_t vl) { + return vfwnmsac_vf_f64m8(acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwnmsac_vv_f64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, + float op1, vfloat32mf2_t op2, size_t vl) { + return vfwnmsac_vf_f64m1_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwnmsac_vv_f64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, + float op1, vfloat32m1_t op2, size_t vl) { + return vfwnmsac_vf_f64m2_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwnmsac_vv_f64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, + float op1, vfloat32m2_t op2, size_t vl) { + return vfwnmsac_vf_f64m4_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwnmsac_vv_f64m8_m(mask, acc, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, + float op1, vfloat32m4_t op2, size_t vl) { + return vfwnmsac_vf_f64m8_m(mask, acc, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c new file mode 100644 index 0000000..9d75dc942 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c @@ -0,0 +1,491 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwsub_vv_f64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { + return vfwsub_vf_f64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwsub_wv_f64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { + return vfwsub_wf_f64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwsub_vv_f64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { + return vfwsub_vf_f64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwsub_wv_f64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { + return vfwsub_wf_f64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwsub_vv_f64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { + return vfwsub_vf_f64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwsub_wv_f64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { + return vfwsub_wf_f64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwsub_vv_f64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { + return vfwsub_vf_f64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwsub_wv_f64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { + return vfwsub_wf_f64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwsub_vv_f64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, + vfloat32mf2_t op1, float op2, size_t vl) { + return vfwsub_vf_f64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, + vfloat64m1_t op1, vfloat32mf2_t op2, + size_t vl) { + return vfwsub_wv_f64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwsub_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, + vfloat64m1_t op1, float op2, size_t vl) { + return vfwsub_wf_f64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwsub_vv_f64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, + vfloat32m1_t op1, float op2, size_t vl) { + return vfwsub_vf_f64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwsub_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, + vfloat64m2_t op1, vfloat32m1_t op2, + size_t vl) { + return vfwsub_wv_f64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwsub_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, + vfloat64m2_t op1, float op2, size_t vl) { + return vfwsub_wf_f64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwsub_vv_f64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, + vfloat32m2_t op1, float op2, size_t vl) { + return vfwsub_vf_f64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwsub_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, + vfloat64m4_t op1, vfloat32m2_t op2, + size_t vl) { + return vfwsub_wv_f64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwsub_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, + vfloat64m4_t op1, float op2, size_t vl) { + return vfwsub_wf_f64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwsub_vv_f64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, + vfloat32m4_t op1, float op2, size_t vl) { + return vfwsub_vf_f64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwsub_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, + vfloat64m8_t op1, vfloat32m4_t op2, + size_t vl) { + return vfwsub_wv_f64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, + vfloat64m8_t op1, float op2, size_t vl) { + return vfwsub_wf_f64m8_m(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c new file mode 100644 index 0000000..455cdb3 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c @@ -0,0 +1,548 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f32mf2_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vmfeq_vv_f32mf2_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f32mf2_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { + return vmfeq_vf_f32mf2_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m1_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m1_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vmfeq_vv_f32m1_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m1_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m1_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { + return vmfeq_vf_f32m1_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vmfeq_vv_f32m2_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { + return vmfeq_vf_f32m2_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m4_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m4_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vmfeq_vv_f32m4_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m4_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m4_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { + return vmfeq_vf_f32m4_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m8_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m8_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vmfeq_vv_f32m8_b4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m8_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m8_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { + return vmfeq_vf_f32m8_b4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m1_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m1_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vmfeq_vv_f64m1_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m1_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m1_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { + return vmfeq_vf_f64m1_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vmfeq_vv_f64m2_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { + return vmfeq_vf_f64m2_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m4_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m4_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vmfeq_vv_f64m4_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m4_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m4_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { + return vmfeq_vf_f64m4_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m8_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m8_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vmfeq_vv_f64m8_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m8_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m8_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { + return vmfeq_vf_f64m8_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f32mf2_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vmfeq_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f32mf2_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat32mf2_t op1, float op2, size_t vl) { + return vmfeq_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m1_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m1_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vmfeq_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m1_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m1_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat32m1_t op1, float op2, size_t vl) { + return vmfeq_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vmfeq_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat32m2_t op1, float op2, size_t vl) { + return vmfeq_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m4_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m4_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vmfeq_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m4_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m4_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat32m4_t op1, float op2, size_t vl) { + return vmfeq_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m8_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m8_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, + vfloat32m8_t op1, vfloat32m8_t op2, + size_t vl) { + return vmfeq_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m8_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m8_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, + vfloat32m8_t op1, float op2, size_t vl) { + return vmfeq_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m1_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m1_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vmfeq_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m1_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m1_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat64m1_t op1, double op2, size_t vl) { + return vmfeq_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vmfeq_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat64m2_t op1, double op2, size_t vl) { + return vmfeq_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m4_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m4_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vmfeq_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m4_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m4_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat64m4_t op1, double op2, size_t vl) { + return vmfeq_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m8_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m8_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat64m8_t op1, vfloat64m8_t op2, + size_t vl) { + return vmfeq_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m8_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m8_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat64m8_t op1, double op2, size_t vl) { + return vmfeq_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c new file mode 100644 index 0000000..d252589 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c @@ -0,0 +1,272 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vmfge_vf_f32mf2_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { + return vmfge_vf_f32mf2_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f32m1_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f32m1_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { + return vmfge_vf_f32m1_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f32m2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f32m2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { + return vmfge_vf_f32m2_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f32m4_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f32m4_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { + return vmfge_vf_f32m4_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f32m8_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f32m8_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { + return vmfge_vf_f32m8_b4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f64m1_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f64m1_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { + return vmfge_vf_f64m1_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f64m2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f64m2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { + return vmfge_vf_f64m2_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f64m4_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f64m4_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { + return vmfge_vf_f64m4_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f64m8_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f64m8_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { + return vmfge_vf_f64m8_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f32mf2_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat32mf2_t op1, float op2, size_t vl) { + return vmfge_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f32m1_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f32m1_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat32m1_t op1, float op2, size_t vl) { + return vmfge_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f32m2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f32m2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat32m2_t op1, float op2, size_t vl) { + return vmfge_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f32m4_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f32m4_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat32m4_t op1, float op2, size_t vl) { + return vmfge_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f32m8_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f32m8_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, + vfloat32m8_t op1, float op2, size_t vl) { + return vmfge_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f64m1_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f64m1_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat64m1_t op1, double op2, size_t vl) { + return vmfge_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f64m2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f64m2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat64m2_t op1, double op2, size_t vl) { + return vmfge_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f64m4_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f64m4_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat64m4_t op1, double op2, size_t vl) { + return vmfge_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfge_vf_f64m8_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfge_vf_f64m8_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat64m8_t op1, double op2, size_t vl) { + return vmfge_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c new file mode 100644 index 0000000..12b3955 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c @@ -0,0 +1,272 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f32mf2_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { + return vmfgt_vf_f32mf2_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m1_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m1_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { + return vmfgt_vf_f32m1_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { + return vmfgt_vf_f32m2_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m4_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m4_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { + return vmfgt_vf_f32m4_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m8_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m8_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { + return vmfgt_vf_f32m8_b4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m1_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m1_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { + return vmfgt_vf_f64m1_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { + return vmfgt_vf_f64m2_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m4_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m4_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { + return vmfgt_vf_f64m4_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m8_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m8_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { + return vmfgt_vf_f64m8_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f32mf2_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat32mf2_t op1, float op2, size_t vl) { + return vmfgt_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m1_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m1_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat32m1_t op1, float op2, size_t vl) { + return vmfgt_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat32m2_t op1, float op2, size_t vl) { + return vmfgt_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m4_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m4_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat32m4_t op1, float op2, size_t vl) { + return vmfgt_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m8_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m8_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, + vfloat32m8_t op1, float op2, size_t vl) { + return vmfgt_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m1_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m1_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat64m1_t op1, double op2, size_t vl) { + return vmfgt_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat64m2_t op1, double op2, size_t vl) { + return vmfgt_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m4_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m4_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat64m4_t op1, double op2, size_t vl) { + return vmfgt_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m8_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m8_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat64m8_t op1, double op2, size_t vl) { + return vmfgt_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c new file mode 100644 index 0000000..8ae45a4 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c @@ -0,0 +1,548 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vmfle_vv_f32mf2_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vmfle_vv_f32mf2_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f32mf2_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { + return vmfle_vf_f32mf2_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f32m1_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f32m1_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vmfle_vv_f32m1_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f32m1_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f32m1_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { + return vmfle_vf_f32m1_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f32m2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f32m2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vmfle_vv_f32m2_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f32m2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f32m2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { + return vmfle_vf_f32m2_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f32m4_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f32m4_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vmfle_vv_f32m4_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f32m4_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f32m4_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { + return vmfle_vf_f32m4_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f32m8_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f32m8_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vmfle_vv_f32m8_b4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f32m8_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f32m8_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { + return vmfle_vf_f32m8_b4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f64m1_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f64m1_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vmfle_vv_f64m1_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f64m1_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f64m1_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { + return vmfle_vf_f64m1_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f64m2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f64m2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vmfle_vv_f64m2_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f64m2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f64m2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { + return vmfle_vf_f64m2_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f64m4_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f64m4_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vmfle_vv_f64m4_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f64m4_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f64m4_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { + return vmfle_vf_f64m4_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f64m8_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f64m8_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vmfle_vv_f64m8_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f64m8_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f64m8_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { + return vmfle_vf_f64m8_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f32mf2_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vmfle_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f32mf2_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat32mf2_t op1, float op2, size_t vl) { + return vmfle_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f32m1_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f32m1_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vmfle_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f32m1_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f32m1_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat32m1_t op1, float op2, size_t vl) { + return vmfle_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f32m2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f32m2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vmfle_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f32m2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f32m2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat32m2_t op1, float op2, size_t vl) { + return vmfle_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f32m4_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f32m4_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vmfle_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f32m4_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f32m4_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat32m4_t op1, float op2, size_t vl) { + return vmfle_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f32m8_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f32m8_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, + vfloat32m8_t op1, vfloat32m8_t op2, + size_t vl) { + return vmfle_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f32m8_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f32m8_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, + vfloat32m8_t op1, float op2, size_t vl) { + return vmfle_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f64m1_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f64m1_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vmfle_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f64m1_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f64m1_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat64m1_t op1, double op2, size_t vl) { + return vmfle_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f64m2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f64m2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vmfle_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f64m2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f64m2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat64m2_t op1, double op2, size_t vl) { + return vmfle_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f64m4_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f64m4_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vmfle_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f64m4_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f64m4_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat64m4_t op1, double op2, size_t vl) { + return vmfle_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vv_f64m8_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vv_f64m8_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat64m8_t op1, vfloat64m8_t op2, + size_t vl) { + return vmfle_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfle_vf_f64m8_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfle_vf_f64m8_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat64m8_t op1, double op2, size_t vl) { + return vmfle_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c new file mode 100644 index 0000000..7b87bd9 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c @@ -0,0 +1,548 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vmflt_vv_f32mf2_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vmflt_vv_f32mf2_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f32mf2_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { + return vmflt_vf_f32mf2_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f32m1_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f32m1_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vmflt_vv_f32m1_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f32m1_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f32m1_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { + return vmflt_vf_f32m1_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f32m2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f32m2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vmflt_vv_f32m2_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f32m2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f32m2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { + return vmflt_vf_f32m2_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f32m4_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f32m4_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vmflt_vv_f32m4_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f32m4_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f32m4_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { + return vmflt_vf_f32m4_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f32m8_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f32m8_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vmflt_vv_f32m8_b4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f32m8_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f32m8_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { + return vmflt_vf_f32m8_b4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f64m1_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f64m1_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vmflt_vv_f64m1_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f64m1_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f64m1_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { + return vmflt_vf_f64m1_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f64m2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f64m2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vmflt_vv_f64m2_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f64m2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f64m2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { + return vmflt_vf_f64m2_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f64m4_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f64m4_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vmflt_vv_f64m4_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f64m4_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f64m4_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { + return vmflt_vf_f64m4_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f64m8_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f64m8_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vmflt_vv_f64m8_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f64m8_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f64m8_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { + return vmflt_vf_f64m8_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f32mf2_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vmflt_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f32mf2_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat32mf2_t op1, float op2, size_t vl) { + return vmflt_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f32m1_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f32m1_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vmflt_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f32m1_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f32m1_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat32m1_t op1, float op2, size_t vl) { + return vmflt_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f32m2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f32m2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vmflt_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f32m2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f32m2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat32m2_t op1, float op2, size_t vl) { + return vmflt_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f32m4_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f32m4_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vmflt_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f32m4_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f32m4_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat32m4_t op1, float op2, size_t vl) { + return vmflt_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f32m8_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f32m8_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, + vfloat32m8_t op1, vfloat32m8_t op2, + size_t vl) { + return vmflt_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f32m8_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f32m8_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, + vfloat32m8_t op1, float op2, size_t vl) { + return vmflt_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f64m1_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f64m1_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vmflt_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f64m1_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f64m1_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat64m1_t op1, double op2, size_t vl) { + return vmflt_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f64m2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f64m2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vmflt_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f64m2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f64m2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat64m2_t op1, double op2, size_t vl) { + return vmflt_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f64m4_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f64m4_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vmflt_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f64m4_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f64m4_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat64m4_t op1, double op2, size_t vl) { + return vmflt_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vv_f64m8_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vv_f64m8_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat64m8_t op1, vfloat64m8_t op2, + size_t vl) { + return vmflt_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmflt_vf_f64m8_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmflt_vf_f64m8_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat64m8_t op1, double op2, size_t vl) { + return vmflt_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c new file mode 100644 index 0000000..bbfe91c --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c @@ -0,0 +1,548 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vmfne_vv_f32mf2_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vmfne_vv_f32mf2_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f32mf2_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { + return vmfne_vf_f32mf2_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f32m1_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f32m1_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vmfne_vv_f32m1_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f32m1_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f32m1_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { + return vmfne_vf_f32m1_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f32m2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f32m2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vmfne_vv_f32m2_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f32m2_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f32m2_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { + return vmfne_vf_f32m2_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f32m4_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f32m4_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vmfne_vv_f32m4_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f32m4_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f32m4_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { + return vmfne_vf_f32m4_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f32m8_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f32m8_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vmfne_vv_f32m8_b4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f32m8_b4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f32m8_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { + return vmfne_vf_f32m8_b4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f64m1_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f64m1_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vmfne_vv_f64m1_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f64m1_b64( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f64m1_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { + return vmfne_vf_f64m1_b64(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f64m2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f64m2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vmfne_vv_f64m2_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f64m2_b32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f64m2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { + return vmfne_vf_f64m2_b32(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f64m4_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f64m4_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vmfne_vv_f64m4_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f64m4_b16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f64m4_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { + return vmfne_vf_f64m4_b16(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f64m8_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f64m8_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vmfne_vv_f64m8_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f64m8_b8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f64m8_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { + return vmfne_vf_f64m8_b8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f32mf2_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat32mf2_t op1, vfloat32mf2_t op2, + size_t vl) { + return vmfne_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f32mf2_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat32mf2_t op1, float op2, size_t vl) { + return vmfne_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f32m1_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f32m1_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat32m1_t op1, vfloat32m1_t op2, + size_t vl) { + return vmfne_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f32m1_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f32m1_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat32m1_t op1, float op2, size_t vl) { + return vmfne_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f32m2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f32m2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat32m2_t op1, vfloat32m2_t op2, + size_t vl) { + return vmfne_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f32m2_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f32m2_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat32m2_t op1, float op2, size_t vl) { + return vmfne_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f32m4_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f32m4_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat32m4_t op1, vfloat32m4_t op2, + size_t vl) { + return vmfne_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f32m4_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f32m4_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat32m4_t op1, float op2, size_t vl) { + return vmfne_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f32m8_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f32m8_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, + vfloat32m8_t op1, vfloat32m8_t op2, + size_t vl) { + return vmfne_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f32m8_b4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f32m8_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, + vfloat32m8_t op1, float op2, size_t vl) { + return vmfne_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f64m1_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f64m1_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat64m1_t op1, vfloat64m1_t op2, + size_t vl) { + return vmfne_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f64m1_b64_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f64m1_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, + vfloat64m1_t op1, double op2, size_t vl) { + return vmfne_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f64m2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f64m2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat64m2_t op1, vfloat64m2_t op2, + size_t vl) { + return vmfne_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f64m2_b32_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f64m2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, + vfloat64m2_t op1, double op2, size_t vl) { + return vmfne_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f64m4_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f64m4_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat64m4_t op1, vfloat64m4_t op2, + size_t vl) { + return vmfne_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f64m4_b16_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f64m4_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, + vfloat64m4_t op1, double op2, size_t vl) { + return vmfne_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vv_f64m8_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vv_f64m8_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat64m8_t op1, vfloat64m8_t op2, + size_t vl) { + return vmfne_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vmfne_vf_f64m8_b8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vmfne_vf_f64m8_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, + vfloat64m8_t op1, double op2, size_t vl) { + return vmfne_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +} -- 2.7.4