From ad0a7ad950fec5e422e84f0d3f8942d5e1b116f6 Mon Sep 17 00:00:00 2001 From: ShihPo Hung Date: Tue, 22 Dec 2020 04:50:19 -0800 Subject: [PATCH] [RISCV] Add intrinsics for vf[n]macc/vf[n]msac/vf[n]madd/vf[n]msub instructions This patch defines vfmadd/vfnmacc, vfmsac/vfnmsac, vfmadd/vfnmadd, and vfmsub/vfnmsub lower to V instructions. Authored-by: Roger Ferrer Ibanez Co-Authored-by: ShihPo Hung Differential Revision: https://reviews.llvm.org/D93691 --- llvm/include/llvm/IR/IntrinsicsRISCV.td | 9 + llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td | 36 +- llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll | 856 +++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll | 1142 +++++++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll | 856 +++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll | 1142 +++++++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll | 856 +++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll | 1142 +++++++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll | 856 +++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll | 1142 +++++++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll | 856 +++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll | 1142 +++++++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll | 856 +++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll | 1142 +++++++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll | 856 +++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll | 1142 +++++++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll | 856 +++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll | 1142 +++++++++++++++++++++++ 18 files changed, 16024 insertions(+), 5 deletions(-) create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td index ba0929b..dc080db 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -551,6 +551,15 @@ let TargetPrefix = "riscv" in { defm vfwmul : RISCVBinaryABX; + defm vfmacc : RISCVTernaryAAXA; + defm vfnmacc : RISCVTernaryAAXA; + defm vfmsac : RISCVTernaryAAXA; + defm vfnmsac : RISCVTernaryAAXA; + defm vfmadd : RISCVTernaryAAXA; + defm vfnmadd : RISCVTernaryAAXA; + defm vfmsub : RISCVTernaryAAXA; + defm vfnmsub : RISCVTernaryAAXA; + defm vfsgnj : RISCVBinaryAAX; defm vfsgnjn : RISCVBinaryAAX; defm vfsgnjx : RISCVBinaryAAX; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index f3b6d2f..5c858b0 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -885,9 +885,10 @@ multiclass VPseudoTernaryV_VX { defm _VX : VPseudoTernary; } -multiclass VPseudoTernaryV_VX_AAXA { +multiclass VPseudoTernaryV_VX_AAXA { foreach m = MxList.m in - defm _VX : VPseudoTernary; + defm !if(IsFloat, "_VF", "_VX") : VPseudoTernary; } multiclass VPseudoTernaryW_VV { @@ -907,9 +908,9 @@ multiclass VPseudoTernaryV_VI { defm _VI : VPseudoTernary; } -multiclass VPseudoTernaryV_VV_VX_AAXA { +multiclass VPseudoTernaryV_VV_VX_AAXA { defm "" : VPseudoTernaryV_VV; - defm "" : VPseudoTernaryV_VX_AAXA; + defm "" : VPseudoTernaryV_VX_AAXA; } multiclass VPseudoTernaryV_VX_VI { @@ -1593,7 +1594,8 @@ multiclass VPatTernaryV_VX vtilist> { foreach vti = vtilist in - defm : VPatTernary; @@ -1940,6 +1942,18 @@ defm PseudoVFRDIV : VPseudoBinaryV_VX; defm PseudoVFWMUL : VPseudoBinaryW_VV_VX; //===----------------------------------------------------------------------===// +// 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions +//===----------------------------------------------------------------------===// +defm PseudoVFMACC : VPseudoTernaryV_VV_VX_AAXA; +defm PseudoVFNMACC : VPseudoTernaryV_VV_VX_AAXA; +defm PseudoVFMSAC : VPseudoTernaryV_VV_VX_AAXA; +defm PseudoVFNMSAC : VPseudoTernaryV_VV_VX_AAXA; +defm PseudoVFMADD : VPseudoTernaryV_VV_VX_AAXA; +defm PseudoVFNMADD : VPseudoTernaryV_VV_VX_AAXA; +defm PseudoVFMSUB : VPseudoTernaryV_VV_VX_AAXA; +defm PseudoVFNMSUB : VPseudoTernaryV_VV_VX_AAXA; + +//===----------------------------------------------------------------------===// // 14.12. Vector Floating-Point Sign-Injection Instructions //===----------------------------------------------------------------------===// defm PseudoVFSGNJ : VPseudoBinaryV_VV_VX; @@ -2321,6 +2335,18 @@ defm "" : VPatBinaryV_VX<"int_riscv_vfrdiv", "PseudoVFRDIV", AllFloatVectors>; defm "" : VPatBinaryW_VV_VX<"int_riscv_vfwmul", "PseudoVFWMUL", AllWidenableFloatVectors>; //===----------------------------------------------------------------------===// +// 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions +//===----------------------------------------------------------------------===// +defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmacc", "PseudoVFMACC", AllFloatVectors>; +defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmacc", "PseudoVFNMACC", AllFloatVectors>; +defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmsac", "PseudoVFMSAC", AllFloatVectors>; +defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmsac", "PseudoVFNMSAC", AllFloatVectors>; +defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmadd", "PseudoVFMADD", AllFloatVectors>; +defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmadd", "PseudoVFNMADD", AllFloatVectors>; +defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmsub", "PseudoVFMSUB", AllFloatVectors>; +defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmsub", "PseudoVFNMSUB", AllFloatVectors>; + +//===----------------------------------------------------------------------===// // 14.12. Vector Floating-Point Sign-Injection Instructions //===----------------------------------------------------------------------===// defm "" : VPatBinaryV_VV_VX<"int_riscv_vfsgnj", "PseudoVFSGNJ", AllFloatVectors>; diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll new file mode 100644 index 0000000..44f0ecb --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll @@ -0,0 +1,856 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfmacc.nxv1f16.nxv1f16( + , + , + , + i32); + +define @intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv1f16.nxv1f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( + , + , + , + , + i32); + +define @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv2f16.nxv2f16( + , + , + , + i32); + +define @intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv2f16.nxv2f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16( + , + , + , + , + i32); + +define @intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv4f16.nxv4f16( + , + , + , + i32); + +define @intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv4f16.nxv4f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16( + , + , + , + , + i32); + +define @intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv8f16.nxv8f16( + , + , + , + i32); + +define @intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv8f16.nxv8f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16( + , + , + , + , + i32); + +define @intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv16f16.nxv16f16( + , + , + , + i32); + +define @intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv16f16.nxv16f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16( + , + , + , + , + i32); + +define @intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv1f32.nxv1f32( + , + , + , + i32); + +define @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32( + , + , + , + , + i32); + +define @intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv2f32.nxv2f32( + , + , + , + i32); + +define @intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv2f32.nxv2f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32( + , + , + , + , + i32); + +define @intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv4f32.nxv4f32( + , + , + , + i32); + +define @intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv4f32.nxv4f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32( + , + , + , + , + i32); + +define @intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv8f32.nxv8f32( + , + , + , + i32); + +define @intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv8f32.nxv8f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32( + , + , + , + , + i32); + +define @intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv1f16.f16( + , + half, + , + i32); + +define @intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv1f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv1f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv1f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv2f16.f16( + , + half, + , + i32); + +define @intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv2f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv2f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv2f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv4f16.f16( + , + half, + , + i32); + +define @intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv4f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv4f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv4f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv8f16.f16( + , + half, + , + i32); + +define @intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv8f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv8f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv8f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv16f16.f16( + , + half, + , + i32); + +define @intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv16f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv16f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv16f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv1f32.f32( + , + float, + , + i32); + +define @intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv1f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv1f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv1f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv2f32.f32( + , + float, + , + i32); + +define @intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv2f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv2f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv2f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv4f32.f32( + , + float, + , + i32); + +define @intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv4f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv4f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv4f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv8f32.f32( + , + float, + , + i32); + +define @intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv8f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv8f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv8f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll new file mode 100644 index 0000000..6b83445 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll @@ -0,0 +1,1142 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfmacc.nxv1f16.nxv1f16( + , + , + , + i64); + +define @intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv1f16.nxv1f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( + , + , + , + , + i64); + +define @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv2f16.nxv2f16( + , + , + , + i64); + +define @intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv2f16.nxv2f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16( + , + , + , + , + i64); + +define @intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv4f16.nxv4f16( + , + , + , + i64); + +define @intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv4f16.nxv4f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16( + , + , + , + , + i64); + +define @intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv8f16.nxv8f16( + , + , + , + i64); + +define @intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv8f16.nxv8f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16( + , + , + , + , + i64); + +define @intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv16f16.nxv16f16( + , + , + , + i64); + +define @intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv16f16.nxv16f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16( + , + , + , + , + i64); + +define @intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv1f32.nxv1f32( + , + , + , + i64); + +define @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32( + , + , + , + , + i64); + +define @intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv2f32.nxv2f32( + , + , + , + i64); + +define @intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv2f32.nxv2f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32( + , + , + , + , + i64); + +define @intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv4f32.nxv4f32( + , + , + , + i64); + +define @intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv4f32.nxv4f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32( + , + , + , + , + i64); + +define @intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv8f32.nxv8f32( + , + , + , + i64); + +define @intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv8f32.nxv8f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32( + , + , + , + , + i64); + +define @intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv1f64.nxv1f64( + , + , + , + i64); + +define @intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv1f64.nxv1f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64( + , + , + , + , + i64); + +define @intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv2f64.nxv2f64( + , + , + , + i64); + +define @intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv2f64.nxv2f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64( + , + , + , + , + i64); + +define @intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv4f64.nxv4f64( + , + , + , + i64); + +define @intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vle64.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv4f64.nxv4f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64( + , + , + , + , + i64); + +define @intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vle64.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfmacc.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv1f16.f16( + , + half, + , + i64); + +define @intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv1f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv1f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv1f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv2f16.f16( + , + half, + , + i64); + +define @intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv2f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv2f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv2f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv4f16.f16( + , + half, + , + i64); + +define @intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv4f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv4f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv4f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv8f16.f16( + , + half, + , + i64); + +define @intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv8f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv8f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv8f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv16f16.f16( + , + half, + , + i64); + +define @intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv16f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv16f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv16f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv1f32.f32( + , + float, + , + i64); + +define @intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv1f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv1f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv1f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv2f32.f32( + , + float, + , + i64); + +define @intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv2f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv2f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv2f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv4f32.f32( + , + float, + , + i64); + +define @intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv4f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv4f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv4f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv8f32.f32( + , + float, + , + i64); + +define @intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv8f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv8f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv8f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv1f64.f64( + , + double, + , + i64); + +define @intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv1f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv1f64.f64( + , + double, + , + , + i64); + +define @intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv1f64.f64( + %0, + double %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv2f64.f64( + , + double, + , + i64); + +define @intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv2f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv2f64.f64( + , + double, + , + , + i64); + +define @intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv2f64.f64( + %0, + double %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv4f64.f64( + , + double, + , + i64); + +define @intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.nxv4f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv4f64.f64( + , + double, + , + , + i64); + +define @intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv4f64.f64( + %0, + double %1, + %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll new file mode 100644 index 0000000..344a21b --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll @@ -0,0 +1,856 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfmadd.nxv1f16.nxv1f16( + , + , + , + i32); + +define @intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv1f16.nxv1f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16( + , + , + , + , + i32); + +define @intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv2f16.nxv2f16( + , + , + , + i32); + +define @intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv2f16.nxv2f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16( + , + , + , + , + i32); + +define @intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv4f16.nxv4f16( + , + , + , + i32); + +define @intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv4f16.nxv4f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16( + , + , + , + , + i32); + +define @intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv8f16.nxv8f16( + , + , + , + i32); + +define @intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv8f16.nxv8f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16( + , + , + , + , + i32); + +define @intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv16f16.nxv16f16( + , + , + , + i32); + +define @intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv16f16.nxv16f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16( + , + , + , + , + i32); + +define @intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv1f32.nxv1f32( + , + , + , + i32); + +define @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32( + , + , + , + , + i32); + +define @intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv2f32.nxv2f32( + , + , + , + i32); + +define @intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv2f32.nxv2f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32( + , + , + , + , + i32); + +define @intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv4f32.nxv4f32( + , + , + , + i32); + +define @intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv4f32.nxv4f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32( + , + , + , + , + i32); + +define @intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv8f32.nxv8f32( + , + , + , + i32); + +define @intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv8f32.nxv8f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32( + , + , + , + , + i32); + +define @intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv1f16.f16( + , + half, + , + i32); + +define @intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv1f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv1f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv1f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv2f16.f16( + , + half, + , + i32); + +define @intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv2f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv2f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv2f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv4f16.f16( + , + half, + , + i32); + +define @intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv4f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv4f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv4f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv8f16.f16( + , + half, + , + i32); + +define @intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv8f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv8f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv8f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv16f16.f16( + , + half, + , + i32); + +define @intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv16f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv16f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv16f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv1f32.f32( + , + float, + , + i32); + +define @intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv1f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv1f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv1f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv2f32.f32( + , + float, + , + i32); + +define @intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv2f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv2f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv2f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv4f32.f32( + , + float, + , + i32); + +define @intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv4f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv4f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv4f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv8f32.f32( + , + float, + , + i32); + +define @intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv8f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv8f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv8f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll new file mode 100644 index 0000000..993f535 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll @@ -0,0 +1,1142 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfmadd.nxv1f16.nxv1f16( + , + , + , + i64); + +define @intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv1f16.nxv1f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16( + , + , + , + , + i64); + +define @intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv2f16.nxv2f16( + , + , + , + i64); + +define @intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv2f16.nxv2f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16( + , + , + , + , + i64); + +define @intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv4f16.nxv4f16( + , + , + , + i64); + +define @intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv4f16.nxv4f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16( + , + , + , + , + i64); + +define @intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv8f16.nxv8f16( + , + , + , + i64); + +define @intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv8f16.nxv8f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16( + , + , + , + , + i64); + +define @intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv16f16.nxv16f16( + , + , + , + i64); + +define @intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv16f16.nxv16f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16( + , + , + , + , + i64); + +define @intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv1f32.nxv1f32( + , + , + , + i64); + +define @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32( + , + , + , + , + i64); + +define @intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv2f32.nxv2f32( + , + , + , + i64); + +define @intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv2f32.nxv2f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32( + , + , + , + , + i64); + +define @intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv4f32.nxv4f32( + , + , + , + i64); + +define @intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv4f32.nxv4f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32( + , + , + , + , + i64); + +define @intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv8f32.nxv8f32( + , + , + , + i64); + +define @intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv8f32.nxv8f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32( + , + , + , + , + i64); + +define @intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv1f64.nxv1f64( + , + , + , + i64); + +define @intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv1f64.nxv1f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64( + , + , + , + , + i64); + +define @intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv2f64.nxv2f64( + , + , + , + i64); + +define @intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv2f64.nxv2f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64( + , + , + , + , + i64); + +define @intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv4f64.nxv4f64( + , + , + , + i64); + +define @intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vle64.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv4f64.nxv4f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64( + , + , + , + , + i64); + +define @intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vle64.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfmadd.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv1f16.f16( + , + half, + , + i64); + +define @intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv1f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv1f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv1f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv2f16.f16( + , + half, + , + i64); + +define @intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv2f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv2f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv2f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv4f16.f16( + , + half, + , + i64); + +define @intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv4f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv4f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv4f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv8f16.f16( + , + half, + , + i64); + +define @intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv8f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv8f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv8f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv16f16.f16( + , + half, + , + i64); + +define @intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv16f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv16f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv16f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv1f32.f32( + , + float, + , + i64); + +define @intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv1f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv1f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv1f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv2f32.f32( + , + float, + , + i64); + +define @intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv2f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv2f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv2f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv4f32.f32( + , + float, + , + i64); + +define @intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv4f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv4f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv4f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv8f32.f32( + , + float, + , + i64); + +define @intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv8f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv8f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv8f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv1f64.f64( + , + double, + , + i64); + +define @intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv1f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv1f64.f64( + , + double, + , + , + i64); + +define @intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv1f64.f64( + %0, + double %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv2f64.f64( + , + double, + , + i64); + +define @intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv2f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv2f64.f64( + , + double, + , + , + i64); + +define @intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv2f64.f64( + %0, + double %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv4f64.f64( + , + double, + , + i64); + +define @intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.nxv4f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv4f64.f64( + , + double, + , + , + i64); + +define @intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfmadd.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv4f64.f64( + %0, + double %1, + %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll new file mode 100644 index 0000000..d86f946 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll @@ -0,0 +1,856 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfmsac.nxv1f16.nxv1f16( + , + , + , + i32); + +define @intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv1f16.nxv1f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16( + , + , + , + , + i32); + +define @intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv2f16.nxv2f16( + , + , + , + i32); + +define @intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv2f16.nxv2f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16( + , + , + , + , + i32); + +define @intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv4f16.nxv4f16( + , + , + , + i32); + +define @intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv4f16.nxv4f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16( + , + , + , + , + i32); + +define @intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv8f16.nxv8f16( + , + , + , + i32); + +define @intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv8f16.nxv8f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16( + , + , + , + , + i32); + +define @intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv16f16.nxv16f16( + , + , + , + i32); + +define @intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv16f16.nxv16f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16( + , + , + , + , + i32); + +define @intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv1f32.nxv1f32( + , + , + , + i32); + +define @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32( + , + , + , + , + i32); + +define @intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv2f32.nxv2f32( + , + , + , + i32); + +define @intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv2f32.nxv2f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32( + , + , + , + , + i32); + +define @intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv4f32.nxv4f32( + , + , + , + i32); + +define @intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv4f32.nxv4f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32( + , + , + , + , + i32); + +define @intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv8f32.nxv8f32( + , + , + , + i32); + +define @intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv8f32.nxv8f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32( + , + , + , + , + i32); + +define @intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv1f16.f16( + , + half, + , + i32); + +define @intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv1f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv1f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv1f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv2f16.f16( + , + half, + , + i32); + +define @intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv2f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv2f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv2f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv4f16.f16( + , + half, + , + i32); + +define @intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv4f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv4f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv4f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv8f16.f16( + , + half, + , + i32); + +define @intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv8f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv8f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv8f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv16f16.f16( + , + half, + , + i32); + +define @intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv16f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv16f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv16f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv1f32.f32( + , + float, + , + i32); + +define @intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv1f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv1f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv1f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv2f32.f32( + , + float, + , + i32); + +define @intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv2f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv2f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv2f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv4f32.f32( + , + float, + , + i32); + +define @intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv4f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv4f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv4f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv8f32.f32( + , + float, + , + i32); + +define @intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv8f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv8f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv8f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll new file mode 100644 index 0000000..03364ab --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll @@ -0,0 +1,1142 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfmsac.nxv1f16.nxv1f16( + , + , + , + i64); + +define @intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv1f16.nxv1f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16( + , + , + , + , + i64); + +define @intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv2f16.nxv2f16( + , + , + , + i64); + +define @intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv2f16.nxv2f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16( + , + , + , + , + i64); + +define @intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv4f16.nxv4f16( + , + , + , + i64); + +define @intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv4f16.nxv4f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16( + , + , + , + , + i64); + +define @intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv8f16.nxv8f16( + , + , + , + i64); + +define @intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv8f16.nxv8f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16( + , + , + , + , + i64); + +define @intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv16f16.nxv16f16( + , + , + , + i64); + +define @intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv16f16.nxv16f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16( + , + , + , + , + i64); + +define @intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv1f32.nxv1f32( + , + , + , + i64); + +define @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32( + , + , + , + , + i64); + +define @intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv2f32.nxv2f32( + , + , + , + i64); + +define @intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv2f32.nxv2f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32( + , + , + , + , + i64); + +define @intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv4f32.nxv4f32( + , + , + , + i64); + +define @intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv4f32.nxv4f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32( + , + , + , + , + i64); + +define @intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv8f32.nxv8f32( + , + , + , + i64); + +define @intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv8f32.nxv8f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32( + , + , + , + , + i64); + +define @intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv1f64.nxv1f64( + , + , + , + i64); + +define @intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv1f64.nxv1f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64( + , + , + , + , + i64); + +define @intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv2f64.nxv2f64( + , + , + , + i64); + +define @intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv2f64.nxv2f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64( + , + , + , + , + i64); + +define @intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv4f64.nxv4f64( + , + , + , + i64); + +define @intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vle64.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv4f64.nxv4f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64( + , + , + , + , + i64); + +define @intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vle64.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfmsac.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv1f16.f16( + , + half, + , + i64); + +define @intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv1f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv1f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv1f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv2f16.f16( + , + half, + , + i64); + +define @intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv2f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv2f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv2f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv4f16.f16( + , + half, + , + i64); + +define @intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv4f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv4f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv4f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv8f16.f16( + , + half, + , + i64); + +define @intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv8f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv8f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv8f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv16f16.f16( + , + half, + , + i64); + +define @intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv16f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv16f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv16f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv1f32.f32( + , + float, + , + i64); + +define @intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv1f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv1f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv1f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv2f32.f32( + , + float, + , + i64); + +define @intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv2f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv2f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv2f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv4f32.f32( + , + float, + , + i64); + +define @intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv4f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv4f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv4f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv8f32.f32( + , + float, + , + i64); + +define @intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv8f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv8f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv8f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv1f64.f64( + , + double, + , + i64); + +define @intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv1f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv1f64.f64( + , + double, + , + , + i64); + +define @intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv1f64.f64( + %0, + double %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv2f64.f64( + , + double, + , + i64); + +define @intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv2f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv2f64.f64( + , + double, + , + , + i64); + +define @intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv2f64.f64( + %0, + double %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv4f64.f64( + , + double, + , + i64); + +define @intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.nxv4f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv4f64.f64( + , + double, + , + , + i64); + +define @intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfmsac.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv4f64.f64( + %0, + double %1, + %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll new file mode 100644 index 0000000..1d8cb06 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll @@ -0,0 +1,856 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfmsub.nxv1f16.nxv1f16( + , + , + , + i32); + +define @intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv1f16.nxv1f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16( + , + , + , + , + i32); + +define @intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv2f16.nxv2f16( + , + , + , + i32); + +define @intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv2f16.nxv2f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16( + , + , + , + , + i32); + +define @intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv4f16.nxv4f16( + , + , + , + i32); + +define @intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv4f16.nxv4f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16( + , + , + , + , + i32); + +define @intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv8f16.nxv8f16( + , + , + , + i32); + +define @intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv8f16.nxv8f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16( + , + , + , + , + i32); + +define @intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv16f16.nxv16f16( + , + , + , + i32); + +define @intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv16f16.nxv16f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16( + , + , + , + , + i32); + +define @intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv1f32.nxv1f32( + , + , + , + i32); + +define @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32( + , + , + , + , + i32); + +define @intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv2f32.nxv2f32( + , + , + , + i32); + +define @intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv2f32.nxv2f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32( + , + , + , + , + i32); + +define @intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv4f32.nxv4f32( + , + , + , + i32); + +define @intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv4f32.nxv4f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32( + , + , + , + , + i32); + +define @intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv8f32.nxv8f32( + , + , + , + i32); + +define @intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv8f32.nxv8f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32( + , + , + , + , + i32); + +define @intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv1f16.f16( + , + half, + , + i32); + +define @intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv1f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv1f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv1f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv2f16.f16( + , + half, + , + i32); + +define @intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv2f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv2f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv2f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv4f16.f16( + , + half, + , + i32); + +define @intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv4f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv4f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv4f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv8f16.f16( + , + half, + , + i32); + +define @intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv8f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv8f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv8f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv16f16.f16( + , + half, + , + i32); + +define @intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv16f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv16f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv16f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv1f32.f32( + , + float, + , + i32); + +define @intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv1f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv1f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv1f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv2f32.f32( + , + float, + , + i32); + +define @intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv2f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv2f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv2f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv4f32.f32( + , + float, + , + i32); + +define @intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv4f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv4f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv4f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv8f32.f32( + , + float, + , + i32); + +define @intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv8f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv8f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv8f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll new file mode 100644 index 0000000..3477318 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll @@ -0,0 +1,1142 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfmsub.nxv1f16.nxv1f16( + , + , + , + i64); + +define @intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv1f16.nxv1f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16( + , + , + , + , + i64); + +define @intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv2f16.nxv2f16( + , + , + , + i64); + +define @intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv2f16.nxv2f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16( + , + , + , + , + i64); + +define @intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv4f16.nxv4f16( + , + , + , + i64); + +define @intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv4f16.nxv4f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16( + , + , + , + , + i64); + +define @intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv8f16.nxv8f16( + , + , + , + i64); + +define @intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv8f16.nxv8f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16( + , + , + , + , + i64); + +define @intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv16f16.nxv16f16( + , + , + , + i64); + +define @intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv16f16.nxv16f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16( + , + , + , + , + i64); + +define @intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv1f32.nxv1f32( + , + , + , + i64); + +define @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32( + , + , + , + , + i64); + +define @intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv2f32.nxv2f32( + , + , + , + i64); + +define @intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv2f32.nxv2f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32( + , + , + , + , + i64); + +define @intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv4f32.nxv4f32( + , + , + , + i64); + +define @intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv4f32.nxv4f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32( + , + , + , + , + i64); + +define @intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv8f32.nxv8f32( + , + , + , + i64); + +define @intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv8f32.nxv8f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32( + , + , + , + , + i64); + +define @intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv1f64.nxv1f64( + , + , + , + i64); + +define @intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv1f64.nxv1f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64( + , + , + , + , + i64); + +define @intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv2f64.nxv2f64( + , + , + , + i64); + +define @intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv2f64.nxv2f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64( + , + , + , + , + i64); + +define @intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv4f64.nxv4f64( + , + , + , + i64); + +define @intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vle64.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv4f64.nxv4f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64( + , + , + , + , + i64); + +define @intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vle64.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfmsub.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv1f16.f16( + , + half, + , + i64); + +define @intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv1f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv1f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv1f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv2f16.f16( + , + half, + , + i64); + +define @intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv2f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv2f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv2f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv4f16.f16( + , + half, + , + i64); + +define @intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv4f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv4f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv4f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv8f16.f16( + , + half, + , + i64); + +define @intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv8f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv8f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv8f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv16f16.f16( + , + half, + , + i64); + +define @intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv16f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv16f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv16f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv1f32.f32( + , + float, + , + i64); + +define @intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv1f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv1f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv1f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv2f32.f32( + , + float, + , + i64); + +define @intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv2f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv2f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv2f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv4f32.f32( + , + float, + , + i64); + +define @intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv4f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv4f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv4f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv8f32.f32( + , + float, + , + i64); + +define @intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv8f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv8f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv8f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv1f64.f64( + , + double, + , + i64); + +define @intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv1f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv1f64.f64( + , + double, + , + , + i64); + +define @intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv1f64.f64( + %0, + double %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv2f64.f64( + , + double, + , + i64); + +define @intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv2f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv2f64.f64( + , + double, + , + , + i64); + +define @intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv2f64.f64( + %0, + double %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv4f64.f64( + , + double, + , + i64); + +define @intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.nxv4f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv4f64.f64( + , + double, + , + , + i64); + +define @intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfmsub.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv4f64.f64( + %0, + double %1, + %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll new file mode 100644 index 0000000..d90664c --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll @@ -0,0 +1,856 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfnmacc.nxv1f16.nxv1f16( + , + , + , + i32); + +define @intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv1f16.nxv1f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16( + , + , + , + , + i32); + +define @intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv2f16.nxv2f16( + , + , + , + i32); + +define @intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv2f16.nxv2f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16( + , + , + , + , + i32); + +define @intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv4f16.nxv4f16( + , + , + , + i32); + +define @intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv4f16.nxv4f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16( + , + , + , + , + i32); + +define @intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv8f16.nxv8f16( + , + , + , + i32); + +define @intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv8f16.nxv8f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16( + , + , + , + , + i32); + +define @intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv16f16.nxv16f16( + , + , + , + i32); + +define @intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv16f16.nxv16f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16( + , + , + , + , + i32); + +define @intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv1f32.nxv1f32( + , + , + , + i32); + +define @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32( + , + , + , + , + i32); + +define @intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv2f32.nxv2f32( + , + , + , + i32); + +define @intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv2f32.nxv2f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32( + , + , + , + , + i32); + +define @intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv4f32.nxv4f32( + , + , + , + i32); + +define @intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv4f32.nxv4f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32( + , + , + , + , + i32); + +define @intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv8f32.nxv8f32( + , + , + , + i32); + +define @intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv8f32.nxv8f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32( + , + , + , + , + i32); + +define @intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv1f16.f16( + , + half, + , + i32); + +define @intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv1f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv1f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv1f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv2f16.f16( + , + half, + , + i32); + +define @intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv2f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv2f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv2f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv4f16.f16( + , + half, + , + i32); + +define @intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv4f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv4f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv4f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv8f16.f16( + , + half, + , + i32); + +define @intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv8f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv8f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv8f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv16f16.f16( + , + half, + , + i32); + +define @intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv16f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv16f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv16f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv1f32.f32( + , + float, + , + i32); + +define @intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv1f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv1f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv2f32.f32( + , + float, + , + i32); + +define @intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv2f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv2f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv2f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv4f32.f32( + , + float, + , + i32); + +define @intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv4f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv4f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv4f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv8f32.f32( + , + float, + , + i32); + +define @intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv8f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv8f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv8f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll new file mode 100644 index 0000000..9e113de --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll @@ -0,0 +1,1142 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfnmacc.nxv1f16.nxv1f16( + , + , + , + i64); + +define @intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv1f16.nxv1f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16( + , + , + , + , + i64); + +define @intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv2f16.nxv2f16( + , + , + , + i64); + +define @intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv2f16.nxv2f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16( + , + , + , + , + i64); + +define @intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv4f16.nxv4f16( + , + , + , + i64); + +define @intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv4f16.nxv4f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16( + , + , + , + , + i64); + +define @intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv8f16.nxv8f16( + , + , + , + i64); + +define @intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv8f16.nxv8f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16( + , + , + , + , + i64); + +define @intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv16f16.nxv16f16( + , + , + , + i64); + +define @intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv16f16.nxv16f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16( + , + , + , + , + i64); + +define @intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv1f32.nxv1f32( + , + , + , + i64); + +define @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32( + , + , + , + , + i64); + +define @intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv2f32.nxv2f32( + , + , + , + i64); + +define @intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv2f32.nxv2f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32( + , + , + , + , + i64); + +define @intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv4f32.nxv4f32( + , + , + , + i64); + +define @intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv4f32.nxv4f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32( + , + , + , + , + i64); + +define @intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv8f32.nxv8f32( + , + , + , + i64); + +define @intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv8f32.nxv8f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32( + , + , + , + , + i64); + +define @intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv1f64.nxv1f64( + , + , + , + i64); + +define @intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv1f64.nxv1f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64( + , + , + , + , + i64); + +define @intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv2f64.nxv2f64( + , + , + , + i64); + +define @intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv2f64.nxv2f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64( + , + , + , + , + i64); + +define @intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv4f64.nxv4f64( + , + , + , + i64); + +define @intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vle64.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv4f64.nxv4f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64( + , + , + , + , + i64); + +define @intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vle64.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfnmacc.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv1f16.f16( + , + half, + , + i64); + +define @intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv1f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv1f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv1f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv2f16.f16( + , + half, + , + i64); + +define @intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv2f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv2f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv2f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv4f16.f16( + , + half, + , + i64); + +define @intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv4f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv4f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv4f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv8f16.f16( + , + half, + , + i64); + +define @intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv8f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv8f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv8f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv16f16.f16( + , + half, + , + i64); + +define @intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv16f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv16f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv16f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv1f32.f32( + , + float, + , + i64); + +define @intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv1f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv1f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv2f32.f32( + , + float, + , + i64); + +define @intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv2f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv2f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv2f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv4f32.f32( + , + float, + , + i64); + +define @intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv4f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv4f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv4f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv8f32.f32( + , + float, + , + i64); + +define @intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv8f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv8f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv8f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv1f64.f64( + , + double, + , + i64); + +define @intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv1f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv1f64.f64( + , + double, + , + , + i64); + +define @intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv1f64.f64( + %0, + double %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv2f64.f64( + , + double, + , + i64); + +define @intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv2f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv2f64.f64( + , + double, + , + , + i64); + +define @intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv2f64.f64( + %0, + double %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv4f64.f64( + , + double, + , + i64); + +define @intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.nxv4f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv4f64.f64( + , + double, + , + , + i64); + +define @intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfnmacc.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv4f64.f64( + %0, + double %1, + %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll new file mode 100644 index 0000000..abea8d6 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll @@ -0,0 +1,856 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfnmadd.nxv1f16.nxv1f16( + , + , + , + i32); + +define @intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv1f16.nxv1f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16( + , + , + , + , + i32); + +define @intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv2f16.nxv2f16( + , + , + , + i32); + +define @intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv2f16.nxv2f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16( + , + , + , + , + i32); + +define @intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv4f16.nxv4f16( + , + , + , + i32); + +define @intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv4f16.nxv4f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16( + , + , + , + , + i32); + +define @intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv8f16.nxv8f16( + , + , + , + i32); + +define @intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv8f16.nxv8f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16( + , + , + , + , + i32); + +define @intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv16f16.nxv16f16( + , + , + , + i32); + +define @intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv16f16.nxv16f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16( + , + , + , + , + i32); + +define @intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv1f32.nxv1f32( + , + , + , + i32); + +define @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32( + , + , + , + , + i32); + +define @intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv2f32.nxv2f32( + , + , + , + i32); + +define @intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv2f32.nxv2f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32( + , + , + , + , + i32); + +define @intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv4f32.nxv4f32( + , + , + , + i32); + +define @intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv4f32.nxv4f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32( + , + , + , + , + i32); + +define @intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv8f32.nxv8f32( + , + , + , + i32); + +define @intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv8f32.nxv8f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32( + , + , + , + , + i32); + +define @intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv1f16.f16( + , + half, + , + i32); + +define @intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv1f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv1f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv1f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv2f16.f16( + , + half, + , + i32); + +define @intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv2f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv2f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv2f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv4f16.f16( + , + half, + , + i32); + +define @intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv4f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv4f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv4f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv8f16.f16( + , + half, + , + i32); + +define @intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv8f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv8f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv8f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv16f16.f16( + , + half, + , + i32); + +define @intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv16f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv16f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv16f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv1f32.f32( + , + float, + , + i32); + +define @intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv1f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv1f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv2f32.f32( + , + float, + , + i32); + +define @intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv2f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv2f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv2f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv4f32.f32( + , + float, + , + i32); + +define @intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv4f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv4f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv4f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv8f32.f32( + , + float, + , + i32); + +define @intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv8f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv8f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv8f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll new file mode 100644 index 0000000..4b4b813 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll @@ -0,0 +1,1142 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfnmadd.nxv1f16.nxv1f16( + , + , + , + i64); + +define @intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv1f16.nxv1f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16( + , + , + , + , + i64); + +define @intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv2f16.nxv2f16( + , + , + , + i64); + +define @intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv2f16.nxv2f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16( + , + , + , + , + i64); + +define @intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv4f16.nxv4f16( + , + , + , + i64); + +define @intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv4f16.nxv4f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16( + , + , + , + , + i64); + +define @intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv8f16.nxv8f16( + , + , + , + i64); + +define @intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv8f16.nxv8f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16( + , + , + , + , + i64); + +define @intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv16f16.nxv16f16( + , + , + , + i64); + +define @intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv16f16.nxv16f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16( + , + , + , + , + i64); + +define @intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv1f32.nxv1f32( + , + , + , + i64); + +define @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32( + , + , + , + , + i64); + +define @intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv2f32.nxv2f32( + , + , + , + i64); + +define @intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv2f32.nxv2f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32( + , + , + , + , + i64); + +define @intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv4f32.nxv4f32( + , + , + , + i64); + +define @intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv4f32.nxv4f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32( + , + , + , + , + i64); + +define @intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv8f32.nxv8f32( + , + , + , + i64); + +define @intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv8f32.nxv8f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32( + , + , + , + , + i64); + +define @intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv1f64.nxv1f64( + , + , + , + i64); + +define @intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv1f64.nxv1f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64( + , + , + , + , + i64); + +define @intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv2f64.nxv2f64( + , + , + , + i64); + +define @intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv2f64.nxv2f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64( + , + , + , + , + i64); + +define @intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv4f64.nxv4f64( + , + , + , + i64); + +define @intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vle64.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv4f64.nxv4f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64( + , + , + , + , + i64); + +define @intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vle64.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfnmadd.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv1f16.f16( + , + half, + , + i64); + +define @intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv1f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv1f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv1f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv2f16.f16( + , + half, + , + i64); + +define @intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv2f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv2f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv2f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv4f16.f16( + , + half, + , + i64); + +define @intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv4f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv4f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv4f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv8f16.f16( + , + half, + , + i64); + +define @intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv8f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv8f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv8f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv16f16.f16( + , + half, + , + i64); + +define @intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv16f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv16f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv16f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv1f32.f32( + , + float, + , + i64); + +define @intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv1f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv1f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv2f32.f32( + , + float, + , + i64); + +define @intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv2f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv2f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv2f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv4f32.f32( + , + float, + , + i64); + +define @intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv4f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv4f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv4f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv8f32.f32( + , + float, + , + i64); + +define @intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv8f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv8f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv8f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv1f64.f64( + , + double, + , + i64); + +define @intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv1f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv1f64.f64( + , + double, + , + , + i64); + +define @intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv1f64.f64( + %0, + double %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv2f64.f64( + , + double, + , + i64); + +define @intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv2f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv2f64.f64( + , + double, + , + , + i64); + +define @intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv2f64.f64( + %0, + double %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv4f64.f64( + , + double, + , + i64); + +define @intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.nxv4f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv4f64.f64( + , + double, + , + , + i64); + +define @intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfnmadd.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv4f64.f64( + %0, + double %1, + %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll new file mode 100644 index 0000000..7b09088 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll @@ -0,0 +1,856 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfnmsac.nxv1f16.nxv1f16( + , + , + , + i32); + +define @intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv1f16.nxv1f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16( + , + , + , + , + i32); + +define @intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv2f16.nxv2f16( + , + , + , + i32); + +define @intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv2f16.nxv2f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16( + , + , + , + , + i32); + +define @intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv4f16.nxv4f16( + , + , + , + i32); + +define @intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv4f16.nxv4f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16( + , + , + , + , + i32); + +define @intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv8f16.nxv8f16( + , + , + , + i32); + +define @intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv8f16.nxv8f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16( + , + , + , + , + i32); + +define @intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv16f16.nxv16f16( + , + , + , + i32); + +define @intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv16f16.nxv16f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16( + , + , + , + , + i32); + +define @intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv1f32.nxv1f32( + , + , + , + i32); + +define @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32( + , + , + , + , + i32); + +define @intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv2f32.nxv2f32( + , + , + , + i32); + +define @intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv2f32.nxv2f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32( + , + , + , + , + i32); + +define @intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv4f32.nxv4f32( + , + , + , + i32); + +define @intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv4f32.nxv4f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32( + , + , + , + , + i32); + +define @intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv8f32.nxv8f32( + , + , + , + i32); + +define @intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv8f32.nxv8f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32( + , + , + , + , + i32); + +define @intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv1f16.f16( + , + half, + , + i32); + +define @intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv1f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv1f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv1f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv2f16.f16( + , + half, + , + i32); + +define @intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv2f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv2f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv2f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv4f16.f16( + , + half, + , + i32); + +define @intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv4f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv4f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv4f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv8f16.f16( + , + half, + , + i32); + +define @intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv8f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv8f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv8f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv16f16.f16( + , + half, + , + i32); + +define @intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv16f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv16f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv16f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv1f32.f32( + , + float, + , + i32); + +define @intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv1f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv1f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv2f32.f32( + , + float, + , + i32); + +define @intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv2f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv2f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv2f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv4f32.f32( + , + float, + , + i32); + +define @intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv4f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv4f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv4f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv8f32.f32( + , + float, + , + i32); + +define @intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv8f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv8f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv8f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll new file mode 100644 index 0000000..23d83c2 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll @@ -0,0 +1,1142 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfnmsac.nxv1f16.nxv1f16( + , + , + , + i64); + +define @intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv1f16.nxv1f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16( + , + , + , + , + i64); + +define @intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv2f16.nxv2f16( + , + , + , + i64); + +define @intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv2f16.nxv2f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16( + , + , + , + , + i64); + +define @intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv4f16.nxv4f16( + , + , + , + i64); + +define @intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv4f16.nxv4f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16( + , + , + , + , + i64); + +define @intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv8f16.nxv8f16( + , + , + , + i64); + +define @intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv8f16.nxv8f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16( + , + , + , + , + i64); + +define @intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv16f16.nxv16f16( + , + , + , + i64); + +define @intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv16f16.nxv16f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16( + , + , + , + , + i64); + +define @intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv1f32.nxv1f32( + , + , + , + i64); + +define @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32( + , + , + , + , + i64); + +define @intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv2f32.nxv2f32( + , + , + , + i64); + +define @intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv2f32.nxv2f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32( + , + , + , + , + i64); + +define @intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv4f32.nxv4f32( + , + , + , + i64); + +define @intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv4f32.nxv4f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32( + , + , + , + , + i64); + +define @intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv8f32.nxv8f32( + , + , + , + i64); + +define @intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv8f32.nxv8f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32( + , + , + , + , + i64); + +define @intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv1f64.nxv1f64( + , + , + , + i64); + +define @intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv1f64.nxv1f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64( + , + , + , + , + i64); + +define @intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv2f64.nxv2f64( + , + , + , + i64); + +define @intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv2f64.nxv2f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64( + , + , + , + , + i64); + +define @intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv4f64.nxv4f64( + , + , + , + i64); + +define @intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vle64.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv4f64.nxv4f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64( + , + , + , + , + i64); + +define @intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vle64.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfnmsac.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv1f16.f16( + , + half, + , + i64); + +define @intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv1f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv1f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv1f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv2f16.f16( + , + half, + , + i64); + +define @intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv2f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv2f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv2f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv4f16.f16( + , + half, + , + i64); + +define @intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv4f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv4f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv4f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv8f16.f16( + , + half, + , + i64); + +define @intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv8f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv8f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv8f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv16f16.f16( + , + half, + , + i64); + +define @intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv16f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv16f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv16f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv1f32.f32( + , + float, + , + i64); + +define @intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv1f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv1f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv2f32.f32( + , + float, + , + i64); + +define @intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv2f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv2f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv2f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv4f32.f32( + , + float, + , + i64); + +define @intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv4f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv4f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv4f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv8f32.f32( + , + float, + , + i64); + +define @intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv8f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv8f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv8f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv1f64.f64( + , + double, + , + i64); + +define @intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv1f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv1f64.f64( + , + double, + , + , + i64); + +define @intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv1f64.f64( + %0, + double %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv2f64.f64( + , + double, + , + i64); + +define @intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv2f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv2f64.f64( + , + double, + , + , + i64); + +define @intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv2f64.f64( + %0, + double %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv4f64.f64( + , + double, + , + i64); + +define @intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.nxv4f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv4f64.f64( + , + double, + , + , + i64); + +define @intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfnmsac.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv4f64.f64( + %0, + double %1, + %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll new file mode 100644 index 0000000..4cbeb71 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll @@ -0,0 +1,856 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfnmsub.nxv1f16.nxv1f16( + , + , + , + i32); + +define @intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv1f16.nxv1f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16( + , + , + , + , + i32); + +define @intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv2f16.nxv2f16( + , + , + , + i32); + +define @intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv2f16.nxv2f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16( + , + , + , + , + i32); + +define @intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv4f16.nxv4f16( + , + , + , + i32); + +define @intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv4f16.nxv4f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16( + , + , + , + , + i32); + +define @intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv8f16.nxv8f16( + , + , + , + i32); + +define @intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv8f16.nxv8f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16( + , + , + , + , + i32); + +define @intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv16f16.nxv16f16( + , + , + , + i32); + +define @intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv16f16.nxv16f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16( + , + , + , + , + i32); + +define @intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv1f32.nxv1f32( + , + , + , + i32); + +define @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32( + , + , + , + , + i32); + +define @intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv2f32.nxv2f32( + , + , + , + i32); + +define @intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv2f32.nxv2f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32( + , + , + , + , + i32); + +define @intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv4f32.nxv4f32( + , + , + , + i32); + +define @intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv4f32.nxv4f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32( + , + , + , + , + i32); + +define @intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv8f32.nxv8f32( + , + , + , + i32); + +define @intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv8f32.nxv8f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32( + , + , + , + , + i32); + +define @intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv1f16.f16( + , + half, + , + i32); + +define @intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv1f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv1f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv1f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv2f16.f16( + , + half, + , + i32); + +define @intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv2f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv2f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv2f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv4f16.f16( + , + half, + , + i32); + +define @intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv4f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv4f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv4f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv8f16.f16( + , + half, + , + i32); + +define @intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv8f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv8f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv8f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv16f16.f16( + , + half, + , + i32); + +define @intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv16f16.f16( + %0, + half %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv16f16.f16( + , + half, + , + , + i32); + +define @intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv16f16.f16( + %0, + half %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv1f32.f32( + , + float, + , + i32); + +define @intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv1f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv1f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv2f32.f32( + , + float, + , + i32); + +define @intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv2f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv2f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv2f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv4f32.f32( + , + float, + , + i32); + +define @intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv4f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv4f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv4f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv8f32.f32( + , + float, + , + i32); + +define @intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv8f32.f32( + %0, + float %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv8f32.f32( + , + float, + , + , + i32); + +define @intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv8f32.f32( + %0, + float %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll new file mode 100644 index 0000000..c25fee5 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll @@ -0,0 +1,1142 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfnmsub.nxv1f16.nxv1f16( + , + , + , + i64); + +define @intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv1f16.nxv1f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16( + , + , + , + , + i64); + +define @intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv2f16.nxv2f16( + , + , + , + i64); + +define @intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv2f16.nxv2f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16( + , + , + , + , + i64); + +define @intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv4f16.nxv4f16( + , + , + , + i64); + +define @intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv4f16.nxv4f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16( + , + , + , + , + i64); + +define @intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv8f16.nxv8f16( + , + , + , + i64); + +define @intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv8f16.nxv8f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16( + , + , + , + , + i64); + +define @intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv16f16.nxv16f16( + , + , + , + i64); + +define @intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv16f16.nxv16f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16( + , + , + , + , + i64); + +define @intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu +; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv1f32.nxv1f32( + , + , + , + i64); + +define @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32( + , + , + , + , + i64); + +define @intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv2f32.nxv2f32( + , + , + , + i64); + +define @intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv2f32.nxv2f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32( + , + , + , + , + i64); + +define @intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv4f32.nxv4f32( + , + , + , + i64); + +define @intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv4f32.nxv4f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32( + , + , + , + , + i64); + +define @intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv8f32.nxv8f32( + , + , + , + i64); + +define @intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv8f32.nxv8f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32( + , + , + , + , + i64); + +define @intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv1f64.nxv1f64( + , + , + , + i64); + +define @intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v17, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv1f64.nxv1f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64( + , + , + , + , + i64); + +define @intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv2f64.nxv2f64( + , + , + , + i64); + +define @intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v18, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv2f64.nxv2f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64( + , + , + , + , + i64); + +define @intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v18, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv4f64.nxv4f64( + , + , + , + i64); + +define @intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vle64.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v20, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv4f64.nxv4f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64( + , + , + , + , + i64); + +define @intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu +; CHECK-NEXT: vle64.v v28, (a0) +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfnmsub.vv v16, v20, v28, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv1f16.f16( + , + half, + , + i64); + +define @intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv1f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv1f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv1f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv2f16.f16( + , + half, + , + i64); + +define @intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv2f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv2f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv2f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv4f16.f16( + , + half, + , + i64); + +define @intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv4f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv4f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv4f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv8f16.f16( + , + half, + , + i64); + +define @intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv8f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv8f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv8f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv16f16.f16( + , + half, + , + i64); + +define @intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv16f16.f16( + %0, + half %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv16f16.f16( + , + half, + , + , + i64); + +define @intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv16f16.f16( + %0, + half %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv1f32.f32( + , + float, + , + i64); + +define @intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv1f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv1f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv2f32.f32( + , + float, + , + i64); + +define @intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv2f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv2f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv2f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv4f32.f32( + , + float, + , + i64); + +define @intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv4f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv4f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv4f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv8f32.f32( + , + float, + , + i64); + +define @intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv8f32.f32( + %0, + float %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv8f32.f32( + , + float, + , + , + i64); + +define @intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.w.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv8f32.f32( + %0, + float %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv1f64.f64( + , + double, + , + i64); + +define @intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv1f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv1f64.f64( + , + double, + , + , + i64); + +define @intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv1f64.f64( + %0, + double %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv2f64.f64( + , + double, + , + i64); + +define @intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv2f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv2f64.f64( + , + double, + , + , + i64); + +define @intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v18, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv2f64.f64( + %0, + double %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv4f64.f64( + , + double, + , + i64); + +define @intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.nxv4f64.f64( + %0, + double %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv4f64.f64( + , + double, + , + , + i64); + +define @intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vfnmsub.vf v16, ft0, v20, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv4f64.f64( + %0, + double %1, + %2, + %3, + i64 %4) + + ret %a +} -- 2.7.4