; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
<vscale x 1 x half>,
ret <vscale x 8 x i1> %a
}
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vmfeq.vv v0, v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu
+; CHECK-NEXT: vmfeq.vv v0, v8, v9
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT: vmfeq.vv v25, v9, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64(
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x double> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vmfeq.vv v0, v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu
+; CHECK-NEXT: vmfeq.vv v0, v8, v10
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT: vmfeq.vv v25, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64(
+ <vscale x 2 x double> %1,
+ <vscale x 2 x double> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x double> %2,
+ <vscale x 2 x double> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT: vmfeq.vv v0, v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu
+; CHECK-NEXT: vmfeq.vv v0, v8, v12
+; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
+; CHECK-NEXT: vmfeq.vv v25, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64(
+ <vscale x 4 x double> %1,
+ <vscale x 4 x double> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x double> %2,
+ <vscale x 4 x double> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16.f16(
<vscale x 1 x half>,
half,
ret <vscale x 8 x i1> %a
}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv1f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu
+; CHECK-NEXT: vmfeq.vf v0, v8, ft0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.f64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfeq.vf v25, v8, ft0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.f64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x double> %1,
+ double %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv2f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vsetvli a0, a2, e64,m2,ta,mu
+; CHECK-NEXT: vmfeq.vf v0, v8, ft0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.f64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmfeq.vf v25, v8, ft0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.f64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x double> %1,
+ double %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv4f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vsetvli a0, a2, e64,m4,ta,mu
+; CHECK-NEXT: vmfeq.vf v0, v8, ft0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.f64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vmfeq.vf v25, v8, ft0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.f64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x double> %1,
+ double %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16.f16(
<vscale x 1 x half>,
ret <vscale x 8 x i1> %a
}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfge_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv1f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu
+; CHECK-NEXT: vmfge.vf v0, v8, ft0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64.f64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfge.vf v25, v8, ft0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64.f64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x double> %1,
+ double %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfge_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv2f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vsetvli a0, a2, e64,m2,ta,mu
+; CHECK-NEXT: vmfge.vf v0, v8, ft0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64.f64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmfge.vf v25, v8, ft0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64.f64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x double> %1,
+ double %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfge_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv4f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vsetvli a0, a2, e64,m4,ta,mu
+; CHECK-NEXT: vmfge.vf v0, v8, ft0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64.f64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vmfge.vf v25, v8, ft0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64.f64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x double> %1,
+ double %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f16.f16(
<vscale x 1 x half>,
ret <vscale x 8 x i1> %a
}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfgt_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv1f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu
+; CHECK-NEXT: vmfgt.vf v0, v8, ft0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64.f64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfgt.vf v25, v8, ft0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64.f64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x double> %1,
+ double %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfgt_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv2f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vsetvli a0, a2, e64,m2,ta,mu
+; CHECK-NEXT: vmfgt.vf v0, v8, ft0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64.f64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmfgt.vf v25, v8, ft0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64.f64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x double> %1,
+ double %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfgt_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv4f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vsetvli a0, a2, e64,m4,ta,mu
+; CHECK-NEXT: vmfgt.vf v0, v8, ft0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64.f64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vmfgt.vf v25, v8, ft0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64.f64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x double> %1,
+ double %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16(
<vscale x 1 x half>,
ret <vscale x 8 x i1> %a
}
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vmfle.vv v0, v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu
+; CHECK-NEXT: vmfle.vv v0, v8, v9
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT: vmfle.vv v25, v9, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64(
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x double> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vmfle.vv v0, v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu
+; CHECK-NEXT: vmfle.vv v0, v8, v10
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT: vmfle.vv v25, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64(
+ <vscale x 2 x double> %1,
+ <vscale x 2 x double> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x double> %2,
+ <vscale x 2 x double> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT: vmfle.vv v0, v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu
+; CHECK-NEXT: vmfle.vv v0, v8, v12
+; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
+; CHECK-NEXT: vmfle.vv v25, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64(
+ <vscale x 4 x double> %1,
+ <vscale x 4 x double> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x double> %2,
+ <vscale x 4 x double> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16.f16(
<vscale x 1 x half>,
half,
ret <vscale x 8 x i1> %a
}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv1f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu
+; CHECK-NEXT: vmfle.vf v0, v8, ft0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.f64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfle.vf v25, v8, ft0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.f64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x double> %1,
+ double %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv2f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vsetvli a0, a2, e64,m2,ta,mu
+; CHECK-NEXT: vmfle.vf v0, v8, ft0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.f64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmfle.vf v25, v8, ft0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.f64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x double> %1,
+ double %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv4f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vsetvli a0, a2, e64,m4,ta,mu
+; CHECK-NEXT: vmfle.vf v0, v8, ft0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.f64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vmfle.vf v25, v8, ft0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.f64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x double> %1,
+ double %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16(
<vscale x 1 x half>,
ret <vscale x 8 x i1> %a
}
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vmflt.vv v0, v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu
+; CHECK-NEXT: vmflt.vv v0, v8, v9
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT: vmflt.vv v25, v9, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64(
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x double> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vmflt.vv v0, v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu
+; CHECK-NEXT: vmflt.vv v0, v8, v10
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT: vmflt.vv v25, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64(
+ <vscale x 2 x double> %1,
+ <vscale x 2 x double> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x double> %2,
+ <vscale x 2 x double> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT: vmflt.vv v0, v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu
+; CHECK-NEXT: vmflt.vv v0, v8, v12
+; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
+; CHECK-NEXT: vmflt.vv v25, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64(
+ <vscale x 4 x double> %1,
+ <vscale x 4 x double> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x double> %2,
+ <vscale x 4 x double> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
declare <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16.f16(
<vscale x 1 x half>,
half,
ret <vscale x 8 x i1> %a
}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv1f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu
+; CHECK-NEXT: vmflt.vf v0, v8, ft0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.f64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.f64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x double> %1,
+ double %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv2f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vsetvli a0, a2, e64,m2,ta,mu
+; CHECK-NEXT: vmflt.vf v0, v8, ft0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.f64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.f64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x double> %1,
+ double %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv4f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vsetvli a0, a2, e64,m4,ta,mu
+; CHECK-NEXT: vmflt.vf v0, v8, ft0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.f64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vmflt.vf v25, v8, ft0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.f64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x double> %1,
+ double %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
<vscale x 1 x half>,
ret <vscale x 8 x i1> %a
}
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vmfne.vv v0, v8, v9
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu
+; CHECK-NEXT: vmfne.vv v0, v8, v9
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT: vmfne.vv v25, v9, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64(
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x double> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vmfne.vv v0, v8, v10
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu
+; CHECK-NEXT: vmfne.vv v0, v8, v10
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT: vmfne.vv v25, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
+ <vscale x 2 x double> %1,
+ <vscale x 2 x double> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x double> %2,
+ <vscale x 2 x double> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT: vmfne.vv v0, v8, v12
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu
+; CHECK-NEXT: vmfne.vv v0, v8, v12
+; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
+; CHECK-NEXT: vmfne.vv v25, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
+ <vscale x 4 x double> %1,
+ <vscale x 4 x double> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x double> %2,
+ <vscale x 4 x double> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16.f16(
<vscale x 1 x half>,
half,
ret <vscale x 8 x i1> %a
}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv1f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu
+; CHECK-NEXT: vmfne.vf v0, v8, ft0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.f64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vmfne.vf v25, v8, ft0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.f64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x double> %1,
+ double %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv2f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vsetvli a0, a2, e64,m2,ta,mu
+; CHECK-NEXT: vmfne.vf v0, v8, ft0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.f64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmfne.vf v25, v8, ft0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.f64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x double> %1,
+ double %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv4f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vsetvli a0, a2, e64,m4,ta,mu
+; CHECK-NEXT: vmfne.vf v0, v8, ft0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.f64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: sw a0, 8(sp)
+; CHECK-NEXT: sw a1, 12(sp)
+; CHECK-NEXT: fld ft0, 8(sp)
+; CHECK-NEXT: vmv1r.v v25, v0
+; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vmfne.vf v25, v8, ft0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.f64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x double> %1,
+ double %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}