SDTCisSameNumEltsAs<0, 3>,
SDTCisVT<4, XLenVT>]>>;
+def riscv_vselect_vl : SDNode<"RISCVISD::VSELECT_VL",
+ SDTypeProfile<1, 4, [SDTCisVec<0>,
+ SDTCisVec<1>,
+ SDTCisSameNumEltsAs<0, 1>,
+ SDTCVecEltisVT<1, i1>,
+ SDTCisSameAs<0, 2>,
+ SDTCisSameAs<2, 3>,
+ SDTCisVT<4, XLenVT>]>>;
+
def SDT_RISCVMaskBinOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>,
SDTCisSameAs<0, 2>,
SDTCVecEltisVT<0, i1>,
defm "" : VPatBinaryVL_VV_VX<riscv_urem_vl, "PseudoVREMU">;
defm "" : VPatBinaryVL_VV_VX<riscv_srem_vl, "PseudoVREM">;
+// 12.16. Vector Integer Merge Instructions
+foreach vti = AllIntegerVectors in {
+ def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
+ vti.RegClass:$rs1,
+ vti.RegClass:$rs2,
+ (XLenVT (VLOp GPR:$vl)))),
+ (!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX)
+ vti.RegClass:$rs2, vti.RegClass:$rs1, VMV0:$vm,
+ GPR:$vl, vti.SEW)>;
+
+ def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
+ (SplatPat XLenVT:$rs1),
+ vti.RegClass:$rs2,
+ (XLenVT (VLOp GPR:$vl)))),
+ (!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX)
+ vti.RegClass:$rs2, GPR:$rs1, VMV0:$vm, GPR:$vl, vti.SEW)>;
+
+ def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
+ (SplatPat_simm5 simm5:$rs1),
+ vti.RegClass:$rs2,
+ (XLenVT (VLOp GPR:$vl)))),
+ (!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX)
+ vti.RegClass:$rs2, simm5:$rs1, VMV0:$vm, GPR:$vl, vti.SEW)>;
+}
+
// 12.17. Vector Integer Move Instructions
foreach vti = AllIntegerVectors in {
def : Pat<(vti.Vector (riscv_vmv_v_x_vl GPR:$rs2, (XLenVT (VLOp GPR:$vl)))),
vti.RegClass:$rs, vti.RegClass:$rs, GPR:$vl, vti.SEW)>;
}
-// 14.16. Vector Floating-Point Move Instruction
foreach fvti = AllFloatVectors in {
+ // Floating-point vselects:
+ // 12.16. Vector Integer Merge Instructions
+ // 14.13. Vector Floating-Point Merge Instruction
+ def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask VMV0:$vm),
+ fvti.RegClass:$rs1,
+ fvti.RegClass:$rs2,
+ (XLenVT (VLOp GPR:$vl)))),
+ (!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX)
+ fvti.RegClass:$rs2, fvti.RegClass:$rs1, VMV0:$vm,
+ GPR:$vl, fvti.SEW)>;
+
+ def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask VMV0:$vm),
+ (SplatFPOp fvti.ScalarRegClass:$rs1),
+ fvti.RegClass:$rs2,
+ (XLenVT (VLOp GPR:$vl)))),
+ (!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX)
+ fvti.RegClass:$rs2,
+ (fvti.Scalar fvti.ScalarRegClass:$rs1),
+ VMV0:$vm, GPR:$vl, fvti.SEW)>;
+
+ def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask VMV0:$vm),
+ (SplatFPOp (fvti.Scalar fpimm0)),
+ fvti.RegClass:$rs2,
+ (XLenVT (VLOp GPR:$vl)))),
+ (!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
+ fvti.RegClass:$rs2, 0, VMV0:$vm, GPR:$vl, fvti.SEW)>;
+
+ // 14.16. Vector Floating-Point Move Instruction
// If we're splatting fpimm0, use vmv.v.x vd, x0.
def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl
(fvti.Scalar (fpimm0)), (XLenVT (VLOp GPR:$vl)))),
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+experimental-v,+experimental-zfh,+f,+d -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+experimental-v,+experimental-zfh,+f,+d -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+
+define void @vselect_vv_v8i32(<8 x i32>* %a, <8 x i32>* %b, <8 x i1>* %cc, <8 x i32>* %z) {
+; CHECK-LABEL: vselect_vv_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a4, zero, 8
+; CHECK-NEXT: vsetvli a5, a4, e32,m2,ta,mu
+; CHECK-NEXT: vle32.v v26, (a0)
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vsetvli a0, a4, e8,m1,ta,mu
+; CHECK-NEXT: vle1.v v0, (a2)
+; CHECK-NEXT: vsetvli a0, a4, e32,m2,ta,mu
+; CHECK-NEXT: vmerge.vvm v26, v28, v26, v0
+; CHECK-NEXT: vse32.v v26, (a3)
+; CHECK-NEXT: ret
+ %va = load <8 x i32>, <8 x i32>* %a
+ %vb = load <8 x i32>, <8 x i32>* %b
+ %vcc = load <8 x i1>, <8 x i1>* %cc
+ %vsel = select <8 x i1> %vcc, <8 x i32> %va, <8 x i32> %vb
+ store <8 x i32> %vsel, <8 x i32>* %z
+ ret void
+}
+
+define void @vselect_vx_v8i32(i32 %a, <8 x i32>* %b, <8 x i1>* %cc, <8 x i32>* %z) {
+; CHECK-LABEL: vselect_vx_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a4, zero, 8
+; CHECK-NEXT: vsetvli a5, a4, e32,m2,ta,mu
+; CHECK-NEXT: vle32.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a4, e8,m1,ta,mu
+; CHECK-NEXT: vle1.v v0, (a2)
+; CHECK-NEXT: vsetvli a1, a4, e32,m2,ta,mu
+; CHECK-NEXT: vmerge.vxm v26, v26, a0, v0
+; CHECK-NEXT: vse32.v v26, (a3)
+; CHECK-NEXT: ret
+ %vb = load <8 x i32>, <8 x i32>* %b
+ %ahead = insertelement <8 x i32> undef, i32 %a, i32 0
+ %va = shufflevector <8 x i32> %ahead, <8 x i32> undef, <8 x i32> zeroinitializer
+ %vcc = load <8 x i1>, <8 x i1>* %cc
+ %vsel = select <8 x i1> %vcc, <8 x i32> %va, <8 x i32> %vb
+ store <8 x i32> %vsel, <8 x i32>* %z
+ ret void
+}
+
+define void @vselect_vi_v8i32(<8 x i32>* %b, <8 x i1>* %cc, <8 x i32>* %z) {
+; CHECK-LABEL: vselect_vi_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a3, zero, 8
+; CHECK-NEXT: vsetvli a4, a3, e32,m2,ta,mu
+; CHECK-NEXT: vle32.v v26, (a0)
+; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vle1.v v0, (a1)
+; CHECK-NEXT: vsetvli a0, a3, e32,m2,ta,mu
+; CHECK-NEXT: vmerge.vim v26, v26, -1, v0
+; CHECK-NEXT: vse32.v v26, (a2)
+; CHECK-NEXT: ret
+ %vb = load <8 x i32>, <8 x i32>* %b
+ %a = insertelement <8 x i32> undef, i32 -1, i32 0
+ %va = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> zeroinitializer
+ %vcc = load <8 x i1>, <8 x i1>* %cc
+ %vsel = select <8 x i1> %vcc, <8 x i32> %va, <8 x i32> %vb
+ store <8 x i32> %vsel, <8 x i32>* %z
+ ret void
+}
+
+define void @vselect_vv_v8f32(<8 x float>* %a, <8 x float>* %b, <8 x i1>* %cc, <8 x float>* %z) {
+; CHECK-LABEL: vselect_vv_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a4, zero, 8
+; CHECK-NEXT: vsetvli a5, a4, e32,m2,ta,mu
+; CHECK-NEXT: vle32.v v26, (a0)
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vsetvli a0, a4, e8,m1,ta,mu
+; CHECK-NEXT: vle1.v v0, (a2)
+; CHECK-NEXT: vsetvli a0, a4, e32,m2,ta,mu
+; CHECK-NEXT: vmerge.vvm v26, v28, v26, v0
+; CHECK-NEXT: vse32.v v26, (a3)
+; CHECK-NEXT: ret
+ %va = load <8 x float>, <8 x float>* %a
+ %vb = load <8 x float>, <8 x float>* %b
+ %vcc = load <8 x i1>, <8 x i1>* %cc
+ %vsel = select <8 x i1> %vcc, <8 x float> %va, <8 x float> %vb
+ store <8 x float> %vsel, <8 x float>* %z
+ ret void
+}
+
+define void @vselect_vx_v8f32(float %a, <8 x float>* %b, <8 x i1>* %cc, <8 x float>* %z) {
+; CHECK-LABEL: vselect_vx_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a3, zero, 8
+; CHECK-NEXT: vsetvli a4, a3, e32,m2,ta,mu
+; CHECK-NEXT: vle32.v v26, (a0)
+; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vle1.v v0, (a1)
+; CHECK-NEXT: vsetvli a0, a3, e32,m2,ta,mu
+; CHECK-NEXT: vfmerge.vfm v26, v26, fa0, v0
+; CHECK-NEXT: vse32.v v26, (a2)
+; CHECK-NEXT: ret
+ %vb = load <8 x float>, <8 x float>* %b
+ %ahead = insertelement <8 x float> undef, float %a, i32 0
+ %va = shufflevector <8 x float> %ahead, <8 x float> undef, <8 x i32> zeroinitializer
+ %vcc = load <8 x i1>, <8 x i1>* %cc
+ %vsel = select <8 x i1> %vcc, <8 x float> %va, <8 x float> %vb
+ store <8 x float> %vsel, <8 x float>* %z
+ ret void
+}
+
+define void @vselect_vfpzero_v8f32(<8 x float>* %b, <8 x i1>* %cc, <8 x float>* %z) {
+; CHECK-LABEL: vselect_vfpzero_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a3, zero, 8
+; CHECK-NEXT: vsetvli a4, a3, e32,m2,ta,mu
+; CHECK-NEXT: vle32.v v26, (a0)
+; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vle1.v v0, (a1)
+; CHECK-NEXT: vsetvli a0, a3, e32,m2,ta,mu
+; CHECK-NEXT: vmerge.vim v26, v26, 0, v0
+; CHECK-NEXT: vse32.v v26, (a2)
+; CHECK-NEXT: ret
+ %vb = load <8 x float>, <8 x float>* %b
+ %a = insertelement <8 x float> undef, float 0.0, i32 0
+ %va = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> zeroinitializer
+ %vcc = load <8 x i1>, <8 x i1>* %cc
+ %vsel = select <8 x i1> %vcc, <8 x float> %va, <8 x float> %vb
+ store <8 x float> %vsel, <8 x float>* %z
+ ret void
+}
+
+define void @vselect_vv_v16i16(<16 x i16>* %a, <16 x i16>* %b, <16 x i1>* %cc, <16 x i16>* %z) {
+; CHECK-LABEL: vselect_vv_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a4, zero, 16
+; CHECK-NEXT: vsetvli a5, a4, e16,m2,ta,mu
+; CHECK-NEXT: vle16.v v26, (a0)
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a0, a4, e8,m1,ta,mu
+; CHECK-NEXT: vle1.v v0, (a2)
+; CHECK-NEXT: vsetvli a0, a4, e16,m2,ta,mu
+; CHECK-NEXT: vmerge.vvm v26, v28, v26, v0
+; CHECK-NEXT: vse16.v v26, (a3)
+; CHECK-NEXT: ret
+ %va = load <16 x i16>, <16 x i16>* %a
+ %vb = load <16 x i16>, <16 x i16>* %b
+ %vcc = load <16 x i1>, <16 x i1>* %cc
+ %vsel = select <16 x i1> %vcc, <16 x i16> %va, <16 x i16> %vb
+ store <16 x i16> %vsel, <16 x i16>* %z
+ ret void
+}
+
+define void @vselect_vx_v16i16(i16 signext %a, <16 x i16>* %b, <16 x i1>* %cc, <16 x i16>* %z) {
+; CHECK-LABEL: vselect_vx_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a4, zero, 16
+; CHECK-NEXT: vsetvli a5, a4, e16,m2,ta,mu
+; CHECK-NEXT: vle16.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a4, e8,m1,ta,mu
+; CHECK-NEXT: vle1.v v0, (a2)
+; CHECK-NEXT: vsetvli a1, a4, e16,m2,ta,mu
+; CHECK-NEXT: vmerge.vxm v26, v26, a0, v0
+; CHECK-NEXT: vse16.v v26, (a3)
+; CHECK-NEXT: ret
+ %vb = load <16 x i16>, <16 x i16>* %b
+ %ahead = insertelement <16 x i16> undef, i16 %a, i32 0
+ %va = shufflevector <16 x i16> %ahead, <16 x i16> undef, <16 x i32> zeroinitializer
+ %vcc = load <16 x i1>, <16 x i1>* %cc
+ %vsel = select <16 x i1> %vcc, <16 x i16> %va, <16 x i16> %vb
+ store <16 x i16> %vsel, <16 x i16>* %z
+ ret void
+}
+
+define void @vselect_vi_v16i16(<16 x i16>* %b, <16 x i1>* %cc, <16 x i16>* %z) {
+; CHECK-LABEL: vselect_vi_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a3, zero, 16
+; CHECK-NEXT: vsetvli a4, a3, e16,m2,ta,mu
+; CHECK-NEXT: vle16.v v26, (a0)
+; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vle1.v v0, (a1)
+; CHECK-NEXT: vsetvli a0, a3, e16,m2,ta,mu
+; CHECK-NEXT: vmerge.vim v26, v26, 4, v0
+; CHECK-NEXT: vse16.v v26, (a2)
+; CHECK-NEXT: ret
+ %vb = load <16 x i16>, <16 x i16>* %b
+ %a = insertelement <16 x i16> undef, i16 4, i32 0
+ %va = shufflevector <16 x i16> %a, <16 x i16> undef, <16 x i32> zeroinitializer
+ %vcc = load <16 x i1>, <16 x i1>* %cc
+ %vsel = select <16 x i1> %vcc, <16 x i16> %va, <16 x i16> %vb
+ store <16 x i16> %vsel, <16 x i16>* %z
+ ret void
+}
+
+define void @vselect_vv_v32f16(<32 x half>* %a, <32 x half>* %b, <32 x i1>* %cc, <32 x half>* %z) {
+; CHECK-LABEL: vselect_vv_v32f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a4, zero, 32
+; CHECK-NEXT: vsetvli a5, a4, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a0, a4, e8,m2,ta,mu
+; CHECK-NEXT: vle1.v v0, (a2)
+; CHECK-NEXT: vsetvli a0, a4, e16,m4,ta,mu
+; CHECK-NEXT: vmerge.vvm v28, v8, v28, v0
+; CHECK-NEXT: vse16.v v28, (a3)
+; CHECK-NEXT: ret
+ %va = load <32 x half>, <32 x half>* %a
+ %vb = load <32 x half>, <32 x half>* %b
+ %vcc = load <32 x i1>, <32 x i1>* %cc
+ %vsel = select <32 x i1> %vcc, <32 x half> %va, <32 x half> %vb
+ store <32 x half> %vsel, <32 x half>* %z
+ ret void
+}
+
+define void @vselect_vx_v32f16(half %a, <32 x half>* %b, <32 x i1>* %cc, <32 x half>* %z) {
+; CHECK-LABEL: vselect_vx_v32f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a3, zero, 32
+; CHECK-NEXT: vsetvli a4, a3, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a3, e8,m2,ta,mu
+; CHECK-NEXT: vle1.v v0, (a1)
+; CHECK-NEXT: vsetvli a0, a3, e16,m4,ta,mu
+; CHECK-NEXT: vfmerge.vfm v28, v28, fa0, v0
+; CHECK-NEXT: vse16.v v28, (a2)
+; CHECK-NEXT: ret
+ %vb = load <32 x half>, <32 x half>* %b
+ %ahead = insertelement <32 x half> undef, half %a, i32 0
+ %va = shufflevector <32 x half> %ahead, <32 x half> undef, <32 x i32> zeroinitializer
+ %vcc = load <32 x i1>, <32 x i1>* %cc
+ %vsel = select <32 x i1> %vcc, <32 x half> %va, <32 x half> %vb
+ store <32 x half> %vsel, <32 x half>* %z
+ ret void
+}
+
+define void @vselect_vfpzero_v32f16(<32 x half>* %b, <32 x i1>* %cc, <32 x half>* %z) {
+; CHECK-LABEL: vselect_vfpzero_v32f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a3, zero, 32
+; CHECK-NEXT: vsetvli a4, a3, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a3, e8,m2,ta,mu
+; CHECK-NEXT: vle1.v v0, (a1)
+; CHECK-NEXT: vsetvli a0, a3, e16,m4,ta,mu
+; CHECK-NEXT: vmerge.vim v28, v28, 0, v0
+; CHECK-NEXT: vse16.v v28, (a2)
+; CHECK-NEXT: ret
+ %vb = load <32 x half>, <32 x half>* %b
+ %a = insertelement <32 x half> undef, half 0.0, i32 0
+ %va = shufflevector <32 x half> %a, <32 x half> undef, <32 x i32> zeroinitializer
+ %vcc = load <32 x i1>, <32 x i1>* %cc
+ %vsel = select <32 x i1> %vcc, <32 x half> %va, <32 x half> %vb
+ store <32 x half> %vsel, <32 x half>* %z
+ ret void
+}