From fed1503e855a1e3cf936fa0866f099bf1c8c9416 Mon Sep 17 00:00:00 2001 From: Fraser Cormack Date: Mon, 14 Jun 2021 11:00:25 +0100 Subject: [PATCH] [RISCV][VP] Lower FP VP ISD nodes to RVV instructions With the exception of `frem`, this patch supports the current set of VP floating-point binary intrinsics by lowering them to to RVV instructions. It does so by using the existing `RISCVISD *_VL` custom nodes as an intermediate layer. Both scalable and fixed-length vectors are supported by using this method. The `frem` node is unsupported due to a lack of available instructions. For fixed-length vectors we could scalarize but that option is not (currently) available for scalable-vector types. The support is intentionally left out so it equivalent for both vector types. The matching of vector/scalar forms is currently lacking, as scalable vector types do not lower to the custom `VFMV_V_F_VL` node. We could either make floating-point scalable vector splats lower to this node, or support the matching of multiple kinds of splat via a `ComplexPattern`, much like we do for integer types. Reviewed By: rogfer01 Differential Revision: https://reviews.llvm.org/D104237 --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 17 + .../CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll | 629 ++++++++++++++++ .../CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll | 629 ++++++++++++++++ .../CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll | 629 ++++++++++++++++ .../CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll | 365 +++++++++ .../CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll | 365 +++++++++ .../CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll | 629 ++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll | 815 +++++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll | 815 +++++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll | 815 +++++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll | 485 ++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll | 485 ++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll | 815 +++++++++++++++++++++ 13 files changed, 7493 insertions(+) create mode 100644 llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 45f7f10..32046a4 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -420,6 +420,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, ISD::VP_SREM, ISD::VP_UREM, ISD::VP_AND, ISD::VP_OR, ISD::VP_XOR, ISD::VP_ASHR, ISD::VP_LSHR, ISD::VP_SHL}; + static unsigned FloatingPointVPOps[] = {ISD::VP_FADD, ISD::VP_FSUB, + ISD::VP_FMUL, ISD::VP_FDIV}; + if (!Subtarget.is64Bit()) { // We must custom-lower certain vXi64 operations on RV32 due to the vector // element type being illegal. @@ -603,6 +606,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); setOperationAction(ISD::VECTOR_REVERSE, VT, Custom); + + for (unsigned VPOpc : FloatingPointVPOps) + setOperationAction(VPOpc, VT, Custom); }; // Sets common extload/truncstore actions on RVV floating-point vector @@ -797,6 +803,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom); + + for (unsigned VPOpc : FloatingPointVPOps) + setOperationAction(VPOpc, VT, Custom); } // Custom-legalize bitcasts from fixed-length vectors to scalar types. @@ -2494,6 +2503,14 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op, return lowerVPOp(Op, DAG, RISCVISD::SRL_VL); case ISD::VP_SHL: return lowerVPOp(Op, DAG, RISCVISD::SHL_VL); + case ISD::VP_FADD: + return lowerVPOp(Op, DAG, RISCVISD::FADD_VL); + case ISD::VP_FSUB: + return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL); + case ISD::VP_FMUL: + return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL); + case ISD::VP_FDIV: + return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL); } } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll new file mode 100644 index 0000000..e6ed948 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll @@ -0,0 +1,629 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <2 x half> @llvm.vp.fadd.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) + +define <2 x half> @vfadd_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x half> @llvm.vp.fadd.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfadd_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fadd.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfadd_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x half> undef, half %b, i32 0 + %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fadd.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfadd_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x half> undef, half %b, i32 0 + %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fadd.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +declare <4 x half> @llvm.vp.fadd.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) + +define <4 x half> @vfadd_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.fadd.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfadd_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fadd.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfadd_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x half> undef, half %b, i32 0 + %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fadd.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfadd_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x half> undef, half %b, i32 0 + %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fadd.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +declare <8 x half> @llvm.vp.fadd.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) + +define <8 x half> @vfadd_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <8 x half> @llvm.vp.fadd.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfadd_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fadd.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfadd_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x half> undef, half %b, i32 0 + %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fadd.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfadd_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x half> undef, half %b, i32 0 + %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fadd.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +declare <16 x half> @llvm.vp.fadd.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) + +define <16 x half> @vfadd_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <16 x half> @llvm.vp.fadd.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfadd_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fadd.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfadd_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x half> undef, half %b, i32 0 + %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fadd.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfadd_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x half> undef, half %b, i32 0 + %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fadd.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +declare <2 x float> @llvm.vp.fadd.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) + +define <2 x float> @vfadd_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x float> @llvm.vp.fadd.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfadd_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fadd.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfadd_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x float> undef, float %b, i32 0 + %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fadd.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfadd_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x float> undef, float %b, i32 0 + %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fadd.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +declare <4 x float> @llvm.vp.fadd.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) + +define <4 x float> @vfadd_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.fadd.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfadd_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fadd.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfadd_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x float> undef, float %b, i32 0 + %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fadd.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfadd_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x float> undef, float %b, i32 0 + %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fadd.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +declare <8 x float> @llvm.vp.fadd.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) + +define <8 x float> @vfadd_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfadd_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfadd_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x float> undef, float %b, i32 0 + %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfadd_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x float> undef, float %b, i32 0 + %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +declare <16 x float> @llvm.vp.fadd.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) + +define <16 x float> @vfadd_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <16 x float> @llvm.vp.fadd.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfadd_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fadd.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfadd_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x float> undef, float %b, i32 0 + %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fadd.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfadd_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x float> undef, float %b, i32 0 + %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fadd.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +declare <2 x double> @llvm.vp.fadd.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) + +define <2 x double> @vfadd_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x double> @llvm.vp.fadd.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfadd_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fadd.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfadd_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x double> undef, double %b, i32 0 + %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fadd.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfadd_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x double> undef, double %b, i32 0 + %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fadd.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +declare <4 x double> @llvm.vp.fadd.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) + +define <4 x double> @vfadd_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.fadd.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfadd_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fadd.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfadd_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x double> undef, double %b, i32 0 + %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fadd.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfadd_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x double> undef, double %b, i32 0 + %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fadd.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +declare <8 x double> @llvm.vp.fadd.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) + +define <8 x double> @vfadd_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <8 x double> @llvm.vp.fadd.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfadd_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fadd.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfadd_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x double> undef, double %b, i32 0 + %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fadd.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfadd_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x double> undef, double %b, i32 0 + %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fadd.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +declare <16 x double> @llvm.vp.fadd.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) + +define <16 x double> @vfadd_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call <16 x double> @llvm.vp.fadd.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfadd_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fadd.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfadd_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x double> undef, double %b, i32 0 + %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fadd.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfadd_vf_v16f64_unmasked(<16 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x double> undef, double %b, i32 0 + %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fadd.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll new file mode 100644 index 0000000..87aaff0 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll @@ -0,0 +1,629 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <2 x half> @llvm.vp.fdiv.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) + +define <2 x half> @vfdiv_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x half> @llvm.vp.fdiv.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfdiv_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fdiv.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfdiv_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x half> undef, half %b, i32 0 + %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fdiv.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfdiv_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x half> undef, half %b, i32 0 + %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fdiv.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +declare <4 x half> @llvm.vp.fdiv.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) + +define <4 x half> @vfdiv_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.fdiv.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfdiv_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fdiv.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfdiv_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x half> undef, half %b, i32 0 + %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fdiv.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfdiv_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x half> undef, half %b, i32 0 + %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fdiv.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +declare <8 x half> @llvm.vp.fdiv.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) + +define <8 x half> @vfdiv_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <8 x half> @llvm.vp.fdiv.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfdiv_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fdiv.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfdiv_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x half> undef, half %b, i32 0 + %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fdiv.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfdiv_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x half> undef, half %b, i32 0 + %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fdiv.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +declare <16 x half> @llvm.vp.fdiv.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) + +define <16 x half> @vfdiv_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <16 x half> @llvm.vp.fdiv.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfdiv_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fdiv.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfdiv_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x half> undef, half %b, i32 0 + %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fdiv.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfdiv_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x half> undef, half %b, i32 0 + %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fdiv.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +declare <2 x float> @llvm.vp.fdiv.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) + +define <2 x float> @vfdiv_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x float> @llvm.vp.fdiv.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfdiv_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fdiv.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfdiv_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x float> undef, float %b, i32 0 + %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fdiv.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfdiv_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x float> undef, float %b, i32 0 + %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fdiv.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +declare <4 x float> @llvm.vp.fdiv.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) + +define <4 x float> @vfdiv_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfdiv_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfdiv_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x float> undef, float %b, i32 0 + %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfdiv_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x float> undef, float %b, i32 0 + %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +declare <8 x float> @llvm.vp.fdiv.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) + +define <8 x float> @vfdiv_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <8 x float> @llvm.vp.fdiv.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfdiv_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fdiv.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfdiv_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x float> undef, float %b, i32 0 + %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fdiv.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfdiv_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x float> undef, float %b, i32 0 + %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fdiv.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +declare <16 x float> @llvm.vp.fdiv.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) + +define <16 x float> @vfdiv_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <16 x float> @llvm.vp.fdiv.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfdiv_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fdiv.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfdiv_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x float> undef, float %b, i32 0 + %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fdiv.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfdiv_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x float> undef, float %b, i32 0 + %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fdiv.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +declare <2 x double> @llvm.vp.fdiv.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) + +define <2 x double> @vfdiv_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x double> @llvm.vp.fdiv.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfdiv_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fdiv.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfdiv_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x double> undef, double %b, i32 0 + %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fdiv.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfdiv_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x double> undef, double %b, i32 0 + %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fdiv.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +declare <4 x double> @llvm.vp.fdiv.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) + +define <4 x double> @vfdiv_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.fdiv.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfdiv_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fdiv.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfdiv_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x double> undef, double %b, i32 0 + %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fdiv.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfdiv_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x double> undef, double %b, i32 0 + %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fdiv.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +declare <8 x double> @llvm.vp.fdiv.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) + +define <8 x double> @vfdiv_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfdiv_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfdiv_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x double> undef, double %b, i32 0 + %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfdiv_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x double> undef, double %b, i32 0 + %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +declare <16 x double> @llvm.vp.fdiv.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) + +define <16 x double> @vfdiv_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call <16 x double> @llvm.vp.fdiv.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfdiv_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fdiv.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfdiv_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x double> undef, double %b, i32 0 + %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fdiv.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfdiv_vf_v16f64_unmasked(<16 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x double> undef, double %b, i32 0 + %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fdiv.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll new file mode 100644 index 0000000..259fbfb --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll @@ -0,0 +1,629 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <2 x half> @llvm.vp.fmul.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) + +define <2 x half> @vfmul_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x half> @llvm.vp.fmul.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfmul_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fmul.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfmul_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x half> undef, half %b, i32 0 + %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fmul.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfmul_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x half> undef, half %b, i32 0 + %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fmul.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +declare <4 x half> @llvm.vp.fmul.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) + +define <4 x half> @vfmul_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.fmul.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfmul_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fmul.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfmul_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x half> undef, half %b, i32 0 + %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fmul.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfmul_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x half> undef, half %b, i32 0 + %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fmul.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +declare <8 x half> @llvm.vp.fmul.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) + +define <8 x half> @vfmul_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <8 x half> @llvm.vp.fmul.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfmul_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fmul.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfmul_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x half> undef, half %b, i32 0 + %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fmul.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfmul_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x half> undef, half %b, i32 0 + %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fmul.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +declare <16 x half> @llvm.vp.fmul.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) + +define <16 x half> @vfmul_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <16 x half> @llvm.vp.fmul.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfmul_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fmul.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfmul_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x half> undef, half %b, i32 0 + %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fmul.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfmul_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x half> undef, half %b, i32 0 + %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fmul.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +declare <2 x float> @llvm.vp.fmul.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) + +define <2 x float> @vfmul_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x float> @llvm.vp.fmul.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfmul_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fmul.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfmul_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x float> undef, float %b, i32 0 + %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fmul.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfmul_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x float> undef, float %b, i32 0 + %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fmul.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +declare <4 x float> @llvm.vp.fmul.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) + +define <4 x float> @vfmul_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.fmul.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfmul_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fmul.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfmul_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x float> undef, float %b, i32 0 + %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fmul.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfmul_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x float> undef, float %b, i32 0 + %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fmul.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +declare <8 x float> @llvm.vp.fmul.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) + +define <8 x float> @vfmul_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <8 x float> @llvm.vp.fmul.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfmul_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fmul.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfmul_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x float> undef, float %b, i32 0 + %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fmul.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfmul_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x float> undef, float %b, i32 0 + %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fmul.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +declare <16 x float> @llvm.vp.fmul.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) + +define <16 x float> @vfmul_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <16 x float> @llvm.vp.fmul.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfmul_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fmul.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfmul_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x float> undef, float %b, i32 0 + %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fmul.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfmul_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x float> undef, float %b, i32 0 + %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fmul.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +declare <2 x double> @llvm.vp.fmul.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) + +define <2 x double> @vfmul_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x double> @llvm.vp.fmul.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfmul_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fmul.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfmul_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x double> undef, double %b, i32 0 + %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fmul.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfmul_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x double> undef, double %b, i32 0 + %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fmul.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +declare <4 x double> @llvm.vp.fmul.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) + +define <4 x double> @vfmul_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.fmul.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfmul_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fmul.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfmul_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x double> undef, double %b, i32 0 + %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fmul.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfmul_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x double> undef, double %b, i32 0 + %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fmul.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +declare <8 x double> @llvm.vp.fmul.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) + +define <8 x double> @vfmul_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <8 x double> @llvm.vp.fmul.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfmul_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fmul.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfmul_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x double> undef, double %b, i32 0 + %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fmul.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfmul_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x double> undef, double %b, i32 0 + %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fmul.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +declare <16 x double> @llvm.vp.fmul.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) + +define <16 x double> @vfmul_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call <16 x double> @llvm.vp.fmul.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfmul_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fmul.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfmul_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x double> undef, double %b, i32 0 + %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fmul.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfmul_vf_v16f64_unmasked(<16 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x double> undef, double %b, i32 0 + %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fmul.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll new file mode 100644 index 0000000..5745e03 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll @@ -0,0 +1,365 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <2 x half> @llvm.vp.fdiv.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) + +define <2 x half> @vfrdiv_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x half> undef, half %b, i32 0 + %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fdiv.v2f16(<2 x half> %vb, <2 x half> %va, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfrdiv_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x half> undef, half %b, i32 0 + %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fdiv.v2f16(<2 x half> %vb, <2 x half> %va, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +declare <4 x half> @llvm.vp.fdiv.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) + +define <4 x half> @vfrdiv_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x half> undef, half %b, i32 0 + %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fdiv.v4f16(<4 x half> %vb, <4 x half> %va, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfrdiv_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x half> undef, half %b, i32 0 + %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fdiv.v4f16(<4 x half> %vb, <4 x half> %va, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +declare <8 x half> @llvm.vp.fdiv.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) + +define <8 x half> @vfrdiv_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x half> undef, half %b, i32 0 + %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fdiv.v8f16(<8 x half> %vb, <8 x half> %va, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfrdiv_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x half> undef, half %b, i32 0 + %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fdiv.v8f16(<8 x half> %vb, <8 x half> %va, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +declare <16 x half> @llvm.vp.fdiv.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) + +define <16 x half> @vfrdiv_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v26, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x half> undef, half %b, i32 0 + %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fdiv.v16f16(<16 x half> %vb, <16 x half> %va, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfrdiv_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x half> undef, half %b, i32 0 + %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fdiv.v16f16(<16 x half> %vb, <16 x half> %va, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +declare <2 x float> @llvm.vp.fdiv.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) + +define <2 x float> @vfrdiv_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x float> undef, float %b, i32 0 + %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fdiv.v2f32(<2 x float> %vb, <2 x float> %va, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfrdiv_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x float> undef, float %b, i32 0 + %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fdiv.v2f32(<2 x float> %vb, <2 x float> %va, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +declare <4 x float> @llvm.vp.fdiv.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) + +define <4 x float> @vfrdiv_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x float> undef, float %b, i32 0 + %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %vb, <4 x float> %va, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfrdiv_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x float> undef, float %b, i32 0 + %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %vb, <4 x float> %va, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +declare <8 x float> @llvm.vp.fdiv.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) + +define <8 x float> @vfrdiv_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v26, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x float> undef, float %b, i32 0 + %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fdiv.v8f32(<8 x float> %vb, <8 x float> %va, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfrdiv_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x float> undef, float %b, i32 0 + %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fdiv.v8f32(<8 x float> %vb, <8 x float> %va, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +declare <16 x float> @llvm.vp.fdiv.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) + +define <16 x float> @vfrdiv_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v28, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x float> undef, float %b, i32 0 + %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fdiv.v16f32(<16 x float> %vb, <16 x float> %va, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfrdiv_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x float> undef, float %b, i32 0 + %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fdiv.v16f32(<16 x float> %vb, <16 x float> %va, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +declare <2 x double> @llvm.vp.fdiv.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) + +define <2 x double> @vfrdiv_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x double> undef, double %b, i32 0 + %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fdiv.v2f64(<2 x double> %vb, <2 x double> %va, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfrdiv_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x double> undef, double %b, i32 0 + %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fdiv.v2f64(<2 x double> %vb, <2 x double> %va, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +declare <4 x double> @llvm.vp.fdiv.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) + +define <4 x double> @vfrdiv_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v26, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x double> undef, double %b, i32 0 + %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fdiv.v4f64(<4 x double> %vb, <4 x double> %va, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfrdiv_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x double> undef, double %b, i32 0 + %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fdiv.v4f64(<4 x double> %vb, <4 x double> %va, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +declare <8 x double> @llvm.vp.fdiv.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) + +define <8 x double> @vfrdiv_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v28, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x double> undef, double %b, i32 0 + %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> %vb, <8 x double> %va, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfrdiv_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x double> undef, double %b, i32 0 + %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> %vb, <8 x double> %va, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +declare <16 x double> @llvm.vp.fdiv.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) + +define <16 x double> @vfrdiv_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x double> undef, double %b, i32 0 + %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fdiv.v16f64(<16 x double> %vb, <16 x double> %va, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfrdiv_vf_v16f64_unmasked(<16 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x double> undef, double %b, i32 0 + %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fdiv.v16f64(<16 x double> %vb, <16 x double> %va, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll new file mode 100644 index 0000000..8762766 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll @@ -0,0 +1,365 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <2 x half> @llvm.vp.fsub.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) + +define <2 x half> @vfrsub_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x half> undef, half %b, i32 0 + %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> %vb, <2 x half> %va, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfrsub_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x half> undef, half %b, i32 0 + %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> %vb, <2 x half> %va, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +declare <4 x half> @llvm.vp.fsub.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) + +define <4 x half> @vfrsub_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x half> undef, half %b, i32 0 + %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> %vb, <4 x half> %va, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfrsub_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x half> undef, half %b, i32 0 + %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> %vb, <4 x half> %va, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +declare <8 x half> @llvm.vp.fsub.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) + +define <8 x half> @vfrsub_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x half> undef, half %b, i32 0 + %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> %vb, <8 x half> %va, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfrsub_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x half> undef, half %b, i32 0 + %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> %vb, <8 x half> %va, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +declare <16 x half> @llvm.vp.fsub.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) + +define <16 x half> @vfrsub_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v26, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x half> undef, half %b, i32 0 + %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> %vb, <16 x half> %va, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfrsub_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x half> undef, half %b, i32 0 + %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> %vb, <16 x half> %va, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +declare <2 x float> @llvm.vp.fsub.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) + +define <2 x float> @vfrsub_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x float> undef, float %b, i32 0 + %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> %vb, <2 x float> %va, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfrsub_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x float> undef, float %b, i32 0 + %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> %vb, <2 x float> %va, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +declare <4 x float> @llvm.vp.fsub.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) + +define <4 x float> @vfrsub_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x float> undef, float %b, i32 0 + %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %vb, <4 x float> %va, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfrsub_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x float> undef, float %b, i32 0 + %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %vb, <4 x float> %va, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +declare <8 x float> @llvm.vp.fsub.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) + +define <8 x float> @vfrsub_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v26, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x float> undef, float %b, i32 0 + %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> %vb, <8 x float> %va, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfrsub_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x float> undef, float %b, i32 0 + %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> %vb, <8 x float> %va, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +declare <16 x float> @llvm.vp.fsub.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) + +define <16 x float> @vfrsub_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v28, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x float> undef, float %b, i32 0 + %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> %vb, <16 x float> %va, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfrsub_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x float> undef, float %b, i32 0 + %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> %vb, <16 x float> %va, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +declare <2 x double> @llvm.vp.fsub.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) + +define <2 x double> @vfrsub_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x double> undef, double %b, i32 0 + %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> %vb, <2 x double> %va, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfrsub_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x double> undef, double %b, i32 0 + %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> %vb, <2 x double> %va, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +declare <4 x double> @llvm.vp.fsub.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) + +define <4 x double> @vfrsub_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v26, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x double> undef, double %b, i32 0 + %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> %vb, <4 x double> %va, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfrsub_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x double> undef, double %b, i32 0 + %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> %vb, <4 x double> %va, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +declare <8 x double> @llvm.vp.fsub.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) + +define <8 x double> @vfrsub_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v28, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x double> undef, double %b, i32 0 + %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> %vb, <8 x double> %va, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfrsub_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x double> undef, double %b, i32 0 + %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> %vb, <8 x double> %va, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +declare <16 x double> @llvm.vp.fsub.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) + +define <16 x double> @vfrsub_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x double> undef, double %b, i32 0 + %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fsub.v16f64(<16 x double> %vb, <16 x double> %va, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfrsub_vf_v16f64_unmasked(<16 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x double> undef, double %b, i32 0 + %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fsub.v16f64(<16 x double> %vb, <16 x double> %va, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll new file mode 100644 index 0000000..ee04b73 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll @@ -0,0 +1,629 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <2 x half> @llvm.vp.fsub.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) + +define <2 x half> @vfsub_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfsub_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfsub_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x half> undef, half %b, i32 0 + %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfsub_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x half> undef, half %b, i32 0 + %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +declare <4 x half> @llvm.vp.fsub.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) + +define <4 x half> @vfsub_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfsub_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfsub_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x half> undef, half %b, i32 0 + %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfsub_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x half> undef, half %b, i32 0 + %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +declare <8 x half> @llvm.vp.fsub.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) + +define <8 x half> @vfsub_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfsub_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfsub_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x half> undef, half %b, i32 0 + %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfsub_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x half> undef, half %b, i32 0 + %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +declare <16 x half> @llvm.vp.fsub.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) + +define <16 x half> @vfsub_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfsub_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfsub_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x half> undef, half %b, i32 0 + %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfsub_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x half> undef, half %b, i32 0 + %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +declare <2 x float> @llvm.vp.fsub.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) + +define <2 x float> @vfsub_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfsub_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfsub_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x float> undef, float %b, i32 0 + %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfsub_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x float> undef, float %b, i32 0 + %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +declare <4 x float> @llvm.vp.fsub.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) + +define <4 x float> @vfsub_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfsub_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfsub_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x float> undef, float %b, i32 0 + %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfsub_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x float> undef, float %b, i32 0 + %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +declare <8 x float> @llvm.vp.fsub.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) + +define <8 x float> @vfsub_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfsub_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfsub_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x float> undef, float %b, i32 0 + %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfsub_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x float> undef, float %b, i32 0 + %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +declare <16 x float> @llvm.vp.fsub.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) + +define <16 x float> @vfsub_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfsub_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfsub_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x float> undef, float %b, i32 0 + %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfsub_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x float> undef, float %b, i32 0 + %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +declare <2 x double> @llvm.vp.fsub.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) + +define <2 x double> @vfsub_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfsub_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfsub_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x double> undef, double %b, i32 0 + %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfsub_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x double> undef, double %b, i32 0 + %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +declare <4 x double> @llvm.vp.fsub.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) + +define <4 x double> @vfsub_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfsub_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfsub_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x double> undef, double %b, i32 0 + %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfsub_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x double> undef, double %b, i32 0 + %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +declare <8 x double> @llvm.vp.fsub.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) + +define <8 x double> @vfsub_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfsub_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfsub_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x double> undef, double %b, i32 0 + %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfsub_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x double> undef, double %b, i32 0 + %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +declare <16 x double> @llvm.vp.fsub.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) + +define <16 x double> @vfsub_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call <16 x double> @llvm.vp.fsub.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfsub_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fsub.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfsub_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x double> undef, double %b, i32 0 + %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fsub.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfsub_vf_v16f64_unmasked(<16 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x double> undef, double %b, i32 0 + %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fsub.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll new file mode 100644 index 0000000..2f33166 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll @@ -0,0 +1,815 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare @llvm.vp.fadd.nxv1f16(, , , i32) + +define @vfadd_vv_nxv1f16( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fadd.nxv1f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vv_nxv1f16_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv1f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv1f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv1f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv1f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv1f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fadd.nxv2f16(, , , i32) + +define @vfadd_vv_nxv2f16( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fadd.nxv2f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vv_nxv2f16_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv2f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv2f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv2f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv2f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv2f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fadd.nxv4f16(, , , i32) + +define @vfadd_vv_nxv4f16( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fadd.nxv4f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vv_nxv4f16_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv4f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv4f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv4f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv4f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv4f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fadd.nxv8f16(, , , i32) + +define @vfadd_vv_nxv8f16( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fadd.nxv8f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vv_nxv8f16_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv8f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv8f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv8f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv8f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v26 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv8f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fadd.nxv16f16(, , , i32) + +define @vfadd_vv_nxv16f16( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fadd.nxv16f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vv_nxv16f16_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv16f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv16f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv16f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv16f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v28 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv16f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fadd.nxv32f16(, , , i32) + +define @vfadd_vv_nxv32f16( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fadd.nxv32f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vv_nxv32f16_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv32f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv32f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv32f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv32f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv32f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fadd.nxv1f32(, , , i32) + +define @vfadd_vv_nxv1f32( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fadd.nxv1f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vv_nxv1f32_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv1f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv1f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv1f32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv1f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv1f32( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fadd.nxv2f32(, , , i32) + +define @vfadd_vv_nxv2f32( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fadd.nxv2f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vv_nxv2f32_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv2f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv2f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv2f32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv2f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv2f32( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fadd.nxv4f32(, , , i32) + +define @vfadd_vv_nxv4f32( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fadd.nxv4f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vv_nxv4f32_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv4f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv4f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv4f32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv4f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v26 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv4f32( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fadd.nxv8f32(, , , i32) + +define @vfadd_vv_nxv8f32( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fadd.nxv8f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vv_nxv8f32_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv8f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv8f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv8f32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv8f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v28 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv8f32( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fadd.nxv16f32(, , , i32) + +define @vfadd_vv_nxv16f32( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fadd.nxv16f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vv_nxv16f32_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv16f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv16f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv16f32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv16f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv16f32( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fadd.nxv1f64(, , , i32) + +define @vfadd_vv_nxv1f64( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fadd.nxv1f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vv_nxv1f64_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv1f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv1f64( %va, double %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv1f64( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv1f64_unmasked( %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv1f64( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fadd.nxv2f64(, , , i32) + +define @vfadd_vv_nxv2f64( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fadd.nxv2f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vv_nxv2f64_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv2f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv2f64( %va, double %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv2f64( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv2f64_unmasked( %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v26 +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv2f64( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fadd.nxv4f64(, , , i32) + +define @vfadd_vv_nxv4f64( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fadd.nxv4f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vv_nxv4f64_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv4f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv4f64( %va, double %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv4f64( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv4f64_unmasked( %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v28 +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv4f64( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fadd.nxv8f64(, , , i32) + +define @vfadd_vv_nxv8f64( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fadd.nxv8f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vv_nxv8f64_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv8f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv8f64( %va, double %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv8f64( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfadd_vf_nxv8f64_unmasked( %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fadd.nxv8f64( %va, %vb, %m, i32 %evl) + ret %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll new file mode 100644 index 0000000..c5f219c --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll @@ -0,0 +1,815 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare @llvm.vp.fdiv.nxv1f16(, , , i32) + +define @vfdiv_vv_nxv1f16( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fdiv.nxv1f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vv_nxv1f16_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv1f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv1f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv1f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv1f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv1f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv2f16(, , , i32) + +define @vfdiv_vv_nxv2f16( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fdiv.nxv2f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vv_nxv2f16_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv2f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv2f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv2f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv2f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv2f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv4f16(, , , i32) + +define @vfdiv_vv_nxv4f16( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fdiv.nxv4f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vv_nxv4f16_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv4f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv4f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv4f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv4f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv4f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv8f16(, , , i32) + +define @vfdiv_vv_nxv8f16( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fdiv.nxv8f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vv_nxv8f16_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv8f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv8f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv8f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv8f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v26 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv8f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv16f16(, , , i32) + +define @vfdiv_vv_nxv16f16( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fdiv.nxv16f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vv_nxv16f16_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv16f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv16f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv16f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv16f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v28 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv16f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv32f16(, , , i32) + +define @vfdiv_vv_nxv32f16( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fdiv.nxv32f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vv_nxv32f16_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv32f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv32f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv32f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv32f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv32f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv1f32(, , , i32) + +define @vfdiv_vv_nxv1f32( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fdiv.nxv1f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vv_nxv1f32_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv1f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv1f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv1f32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv1f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv1f32( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv2f32(, , , i32) + +define @vfdiv_vv_nxv2f32( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fdiv.nxv2f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vv_nxv2f32_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv2f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv2f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv2f32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv2f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv2f32( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv4f32(, , , i32) + +define @vfdiv_vv_nxv4f32( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fdiv.nxv4f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vv_nxv4f32_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv4f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv4f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv4f32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv4f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v26 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv4f32( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv8f32(, , , i32) + +define @vfdiv_vv_nxv8f32( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fdiv.nxv8f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vv_nxv8f32_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv8f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv8f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv8f32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv8f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v28 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv8f32( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv16f32(, , , i32) + +define @vfdiv_vv_nxv16f32( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fdiv.nxv16f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vv_nxv16f32_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv16f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv16f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv16f32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv16f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv16f32( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv1f64(, , , i32) + +define @vfdiv_vv_nxv1f64( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fdiv.nxv1f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vv_nxv1f64_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv1f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv1f64( %va, double %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv1f64( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv1f64_unmasked( %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv1f64( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv2f64(, , , i32) + +define @vfdiv_vv_nxv2f64( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fdiv.nxv2f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vv_nxv2f64_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv2f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv2f64( %va, double %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv2f64( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv2f64_unmasked( %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v26 +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv2f64( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv4f64(, , , i32) + +define @vfdiv_vv_nxv4f64( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fdiv.nxv4f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vv_nxv4f64_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv4f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv4f64( %va, double %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv4f64( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv4f64_unmasked( %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v28 +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv4f64( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv8f64(, , , i32) + +define @vfdiv_vv_nxv8f64( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fdiv.nxv8f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vv_nxv8f64_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv8f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv8f64( %va, double %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv8f64( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfdiv_vf_nxv8f64_unmasked( %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16 +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv8f64( %va, %vb, %m, i32 %evl) + ret %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll new file mode 100644 index 0000000..f33fd4a --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll @@ -0,0 +1,815 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare @llvm.vp.fmul.nxv1f16(, , , i32) + +define @vfmul_vv_nxv1f16( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fmul.nxv1f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vv_nxv1f16_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv1f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv1f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv1f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv1f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv1f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmul.nxv2f16(, , , i32) + +define @vfmul_vv_nxv2f16( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fmul.nxv2f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vv_nxv2f16_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv2f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv2f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv2f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv2f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv2f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmul.nxv4f16(, , , i32) + +define @vfmul_vv_nxv4f16( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fmul.nxv4f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vv_nxv4f16_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv4f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv4f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv4f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv4f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv4f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmul.nxv8f16(, , , i32) + +define @vfmul_vv_nxv8f16( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fmul.nxv8f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vv_nxv8f16_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv8f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv8f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv8f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv8f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v26 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv8f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmul.nxv16f16(, , , i32) + +define @vfmul_vv_nxv16f16( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fmul.nxv16f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vv_nxv16f16_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv16f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv16f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv16f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv16f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v28 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv16f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmul.nxv32f16(, , , i32) + +define @vfmul_vv_nxv32f16( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fmul.nxv32f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vv_nxv32f16_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv32f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv32f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv32f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv32f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv32f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmul.nxv1f32(, , , i32) + +define @vfmul_vv_nxv1f32( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fmul.nxv1f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vv_nxv1f32_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv1f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv1f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv1f32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv1f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv1f32( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmul.nxv2f32(, , , i32) + +define @vfmul_vv_nxv2f32( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fmul.nxv2f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vv_nxv2f32_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv2f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv2f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv2f32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv2f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv2f32( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmul.nxv4f32(, , , i32) + +define @vfmul_vv_nxv4f32( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fmul.nxv4f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vv_nxv4f32_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv4f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv4f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv4f32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv4f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v26 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv4f32( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmul.nxv8f32(, , , i32) + +define @vfmul_vv_nxv8f32( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fmul.nxv8f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vv_nxv8f32_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv8f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv8f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv8f32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv8f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v28 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv8f32( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmul.nxv16f32(, , , i32) + +define @vfmul_vv_nxv16f32( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fmul.nxv16f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vv_nxv16f32_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv16f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv16f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv16f32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv16f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv16f32( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmul.nxv1f64(, , , i32) + +define @vfmul_vv_nxv1f64( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fmul.nxv1f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vv_nxv1f64_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv1f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv1f64( %va, double %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv1f64( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv1f64_unmasked( %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv1f64( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmul.nxv2f64(, , , i32) + +define @vfmul_vv_nxv2f64( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fmul.nxv2f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vv_nxv2f64_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv2f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv2f64( %va, double %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv2f64( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv2f64_unmasked( %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v26 +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv2f64( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmul.nxv4f64(, , , i32) + +define @vfmul_vv_nxv4f64( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fmul.nxv4f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vv_nxv4f64_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv4f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv4f64( %va, double %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv4f64( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv4f64_unmasked( %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v28 +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv4f64( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmul.nxv8f64(, , , i32) + +define @vfmul_vv_nxv8f64( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fmul.nxv8f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vv_nxv8f64_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv8f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv8f64( %va, double %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv8f64( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfmul_vf_nxv8f64_unmasked( %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fmul.nxv8f64( %va, %vb, %m, i32 %evl) + ret %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll new file mode 100644 index 0000000..8aa3875 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll @@ -0,0 +1,485 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare @llvm.vp.fdiv.nxv1f16(, , , i32) + +define @vfrdiv_vf_nxv1f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv1f16( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrdiv_vf_nxv1f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv1f16( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv2f16(, , , i32) + +define @vfrdiv_vf_nxv2f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv2f16( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrdiv_vf_nxv2f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv2f16( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv4f16(, , , i32) + +define @vfrdiv_vf_nxv4f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv4f16( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrdiv_vf_nxv4f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv4f16( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv8f16(, , , i32) + +define @vfrdiv_vf_nxv8f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v26, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv8f16( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrdiv_vf_nxv8f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v26, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv8f16( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv16f16(, , , i32) + +define @vfrdiv_vf_nxv16f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v28, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv16f16( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrdiv_vf_nxv16f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v28, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv16f16( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv32f16(, , , i32) + +define @vfrdiv_vf_nxv32f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv32f16( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrdiv_vf_nxv32f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v16, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv32f16( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv1f32(, , , i32) + +define @vfrdiv_vf_nxv1f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv1f32( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrdiv_vf_nxv1f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv1f32( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv2f32(, , , i32) + +define @vfrdiv_vf_nxv2f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv2f32( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrdiv_vf_nxv2f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv2f32( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv4f32(, , , i32) + +define @vfrdiv_vf_nxv4f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v26, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv4f32( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrdiv_vf_nxv4f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v26, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv4f32( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv8f32(, , , i32) + +define @vfrdiv_vf_nxv8f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v28, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv8f32( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrdiv_vf_nxv8f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v28, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv8f32( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv16f32(, , , i32) + +define @vfrdiv_vf_nxv16f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv16f32( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrdiv_vf_nxv16f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v16, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv16f32( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv1f64(, , , i32) + +define @vfrdiv_vf_nxv1f64( %va, double %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv1f64( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrdiv_vf_nxv1f64_unmasked( %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv1f64( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv2f64(, , , i32) + +define @vfrdiv_vf_nxv2f64( %va, double %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v26, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv2f64( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrdiv_vf_nxv2f64_unmasked( %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v26, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv2f64( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv4f64(, , , i32) + +define @vfrdiv_vf_nxv4f64( %va, double %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v28, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv4f64( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrdiv_vf_nxv4f64_unmasked( %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v28, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv4f64( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fdiv.nxv8f64(, , , i32) + +define @vfrdiv_vf_nxv8f64( %va, double %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv8f64( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrdiv_vf_nxv8f64_unmasked( %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v16, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fdiv.nxv8f64( %vb, %va, %m, i32 %evl) + ret %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll new file mode 100644 index 0000000..13f5cd7 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll @@ -0,0 +1,485 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare @llvm.vp.fsub.nxv1f16(, , , i32) + +define @vfrsub_vf_nxv1f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv1f16( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrsub_vf_nxv1f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv1f16( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv2f16(, , , i32) + +define @vfrsub_vf_nxv2f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv2f16( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrsub_vf_nxv2f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv2f16( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv4f16(, , , i32) + +define @vfrsub_vf_nxv4f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv4f16( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrsub_vf_nxv4f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv4f16( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv8f16(, , , i32) + +define @vfrsub_vf_nxv8f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v26, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv8f16( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrsub_vf_nxv8f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v26, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv8f16( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv16f16(, , , i32) + +define @vfrsub_vf_nxv16f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v28, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv16f16( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrsub_vf_nxv16f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v28, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv16f16( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv32f16(, , , i32) + +define @vfrsub_vf_nxv32f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv32f16( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrsub_vf_nxv32f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v16, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv32f16( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv1f32(, , , i32) + +define @vfrsub_vf_nxv1f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv1f32( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrsub_vf_nxv1f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv1f32( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv2f32(, , , i32) + +define @vfrsub_vf_nxv2f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv2f32( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrsub_vf_nxv2f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv2f32( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv4f32(, , , i32) + +define @vfrsub_vf_nxv4f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v26, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv4f32( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrsub_vf_nxv4f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v26, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv4f32( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv8f32(, , , i32) + +define @vfrsub_vf_nxv8f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v28, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv8f32( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrsub_vf_nxv8f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v28, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv8f32( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv16f32(, , , i32) + +define @vfrsub_vf_nxv16f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv16f32( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrsub_vf_nxv16f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v16, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv16f32( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv1f64(, , , i32) + +define @vfrsub_vf_nxv1f64( %va, double %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv1f64( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrsub_vf_nxv1f64_unmasked( %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv1f64( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv2f64(, , , i32) + +define @vfrsub_vf_nxv2f64( %va, double %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v26, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv2f64( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrsub_vf_nxv2f64_unmasked( %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v26, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv2f64( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv4f64(, , , i32) + +define @vfrsub_vf_nxv4f64( %va, double %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v28, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv4f64( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrsub_vf_nxv4f64_unmasked( %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v28, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv4f64( %vb, %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv8f64(, , , i32) + +define @vfrsub_vf_nxv8f64( %va, double %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv8f64( %vb, %va, %m, i32 %evl) + ret %v +} + +define @vfrsub_vf_nxv8f64_unmasked( %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v16, v8 +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv8f64( %vb, %va, %m, i32 %evl) + ret %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll new file mode 100644 index 0000000..cfcfdf8 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll @@ -0,0 +1,815 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare @llvm.vp.fsub.nxv1f16(, , , i32) + +define @vfsub_vv_nxv1f16( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fsub.nxv1f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vv_nxv1f16_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv1f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv1f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv1f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv1f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv1f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv2f16(, , , i32) + +define @vfsub_vv_nxv2f16( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fsub.nxv2f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vv_nxv2f16_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv2f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv2f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv2f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv2f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv2f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv4f16(, , , i32) + +define @vfsub_vv_nxv4f16( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fsub.nxv4f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vv_nxv4f16_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv4f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv4f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv4f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv4f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv4f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv8f16(, , , i32) + +define @vfsub_vv_nxv8f16( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fsub.nxv8f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vv_nxv8f16_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv8f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv8f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv8f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv8f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v26 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv8f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv16f16(, , , i32) + +define @vfsub_vv_nxv16f16( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fsub.nxv16f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vv_nxv16f16_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv16f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv16f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv16f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv16f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v28 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv16f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv32f16(, , , i32) + +define @vfsub_vv_nxv32f16( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fsub.nxv32f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vv_nxv32f16_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv32f16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv32f16( %va, half %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv32f16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv32f16_unmasked( %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: ret + %elt.head = insertelement undef, half %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv32f16( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv1f32(, , , i32) + +define @vfsub_vv_nxv1f32( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fsub.nxv1f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vv_nxv1f32_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv1f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv1f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv1f32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv1f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv1f32( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv2f32(, , , i32) + +define @vfsub_vv_nxv2f32( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fsub.nxv2f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vv_nxv2f32_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv2f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv2f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv2f32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv2f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv2f32( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv4f32(, , , i32) + +define @vfsub_vv_nxv4f32( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fsub.nxv4f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vv_nxv4f32_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv4f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv4f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv4f32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv4f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v26 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv4f32( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv8f32(, , , i32) + +define @vfsub_vv_nxv8f32( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fsub.nxv8f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vv_nxv8f32_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv8f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv8f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv8f32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv8f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v28 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv8f32( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv16f32(, , , i32) + +define @vfsub_vv_nxv16f32( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fsub.nxv16f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vv_nxv16f32_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv16f32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv16f32( %va, float %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv16f32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv16f32_unmasked( %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: ret + %elt.head = insertelement undef, float %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv16f32( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv1f64(, , , i32) + +define @vfsub_vv_nxv1f64( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fsub.nxv1f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vv_nxv1f64_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv1f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv1f64( %va, double %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv1f64( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv1f64_unmasked( %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv1f64( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv2f64(, , , i32) + +define @vfsub_vv_nxv2f64( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fsub.nxv2f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vv_nxv2f64_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv2f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv2f64( %va, double %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv2f64( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv2f64_unmasked( %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v26 +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv2f64( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv4f64(, , , i32) + +define @vfsub_vv_nxv4f64( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fsub.nxv4f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vv_nxv4f64_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv4f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv4f64( %va, double %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv4f64( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv4f64_unmasked( %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v28 +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv4f64( %va, %vb, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fsub.nxv8f64(, , , i32) + +define @vfsub_vv_nxv8f64( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fsub.nxv8f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vv_nxv8f64_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv8f64( %va, %b, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv8f64( %va, double %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv8f64( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vfsub_vf_nxv8f64_unmasked( %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: ret + %elt.head = insertelement undef, double %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.fsub.nxv8f64( %va, %vb, %m, i32 %evl) + ret %v +} -- 2.7.4